ChipYTY commited on
Commit
4196369
·
verified ·
1 Parent(s): a517ecd

Update full code snapshot (exclude data and model checkpoints)

Browse files
Files changed (40) hide show
  1. .github/workflows/python-publish.yml +36 -0
  2. .github/workflows/test.yaml +24 -0
  3. .gitignore +170 -2
  4. README.md +151 -62
  5. examples/BABILONG_DATA_PIPELINE.md +177 -0
  6. examples/QWEN3_TITANS_MEMORY_INTEGRATION.md +169 -0
  7. examples/QWEN_TITANS_BABILONG_TRAINING_FLOW_CN.txt +272 -0
  8. examples/TITANS_ANALYSIS_CN.md +212 -0
  9. examples/eval_qwen_baseline.py +515 -0
  10. examples/outputs/qwen_titans_babilong_v4/eval_metrics.jsonl +12 -0
  11. examples/qwen_mac_integration.py +692 -0
  12. examples/qwen_titans_streaming.py +600 -0
  13. examples/qwen_with_titans_memory.py +487 -0
  14. examples/run_training.sh +46 -0
  15. examples/train_qwen_baseline_babilong_v4.py +1361 -0
  16. examples/train_qwen_titans_babilong.py +1664 -0
  17. examples/train_qwen_titans_babilong_v2.py +1573 -0
  18. examples/train_qwen_titans_babilong_v3.py +1683 -0
  19. fig1.png +3 -0
  20. fig2.png +3 -0
  21. outputs/freeze_base_500/eval_metrics.jsonl +1 -0
  22. outputs/freeze_base_500_v2/eval_metrics.jsonl +1 -0
  23. outputs/freeze_base_500_v4/eval_metrics.jsonl +1 -0
  24. outputs/freeze_base_500_v5/eval_metrics.jsonl +7 -0
  25. outputs/full_finetune_forwardmem_v2/eval_metrics.jsonl +4 -0
  26. outputs/qwen_ +121 -0
  27. outputs/qwen_babilong_no_memory/eval_metrics.jsonl +4 -0
  28. outputs/qwen_baseline_babilong_v4_ga2_4gpu/eval_metrics.jsonl +3 -0
  29. outputs/qwen_baseline_eval/baseline_results_eval.json +16 -0
  30. outputs/qwen_titans_babilong/eval_metrics.jsonl +31 -0
  31. outputs/qwen_titans_babilong_detach_4gpu_bs1_ckpt/eval_metrics.jsonl +1 -0
  32. outputs/qwen_titans_babilong_v3/eval_metrics.jsonl +5 -0
  33. pyproject.toml +73 -0
  34. tests/test_titans.py +427 -0
  35. titans_pytorch/__init__.py +15 -15
  36. titans_pytorch/implicit_mlp_attention.py +161 -0
  37. titans_pytorch/mac_transformer.py +921 -0
  38. titans_pytorch/nested_attention.py +123 -0
  39. train_implicit_mlp_attn.py +259 -0
  40. train_mac.py +200 -0
.github/workflows/python-publish.yml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will upload a Python Package using Twine when a release is created
2
+ # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3
+
4
+ # This workflow uses actions that are not certified by GitHub.
5
+ # They are provided by a third-party and are governed by
6
+ # separate terms of service, privacy policy, and support
7
+ # documentation.
8
+
9
+ name: Upload Python Package
10
+
11
+ on:
12
+ release:
13
+ types: [published]
14
+
15
+ jobs:
16
+ deploy:
17
+
18
+ runs-on: ubuntu-latest
19
+
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v2
24
+ with:
25
+ python-version: '3.x'
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m pip install --upgrade pip
29
+ pip install build
30
+ - name: Build package
31
+ run: python -m build
32
+ - name: Publish package
33
+ uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
34
+ with:
35
+ user: __token__
36
+ password: ${{ secrets.PYPI_API_TOKEN }}
.github/workflows/test.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Tests the examples in README
2
+ on: [push, pull_request]
3
+
4
+ env:
5
+ TYPECHECK: True
6
+
7
+ jobs:
8
+ test:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v4
12
+ - name: Install Python
13
+ uses: actions/setup-python@v5
14
+ with:
15
+ python-version: "3.11"
16
+ - name: Install dependencies
17
+ run: |
18
+ python -m pip install uv
19
+ python -m uv pip install --upgrade pip
20
+ python -m uv pip install torch --index-url https://download.pytorch.org/whl/nightly/cpu
21
+ python -m uv pip install -e .[test]
22
+ - name: Test with pytest
23
+ run: |
24
+ python -m pytest tests/
.gitignore CHANGED
@@ -1,6 +1,174 @@
 
 
 
 
1
  __pycache__/
2
  *.py[cod]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  .pytest_cache/
4
- .mypy_cache/
5
- .ruff_cache/
 
 
 
 
 
6
  *.log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train_local.py
2
+ .DS_Store
3
+
4
+ # Byte-compiled / optimized / DLL files
5
  __pycache__/
6
  *.py[cod]
7
+ *$py.class
8
+
9
+ # C extensions
10
+ *.so
11
+
12
+ # Distribution / packaging
13
+ .Python
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ wheels/
26
+ share/python-wheels/
27
+ *.egg-info/
28
+ .installed.cfg
29
+ *.egg
30
+ MANIFEST
31
+
32
+ # PyInstaller
33
+ # Usually these files are written by a python script from a template
34
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
35
+ *.manifest
36
+ *.spec
37
+
38
+ # Installer logs
39
+ pip-log.txt
40
+ pip-delete-this-directory.txt
41
+
42
+ # Unit test / coverage reports
43
+ htmlcov/
44
+ .tox/
45
+ .nox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
  .pytest_cache/
55
+ cover/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
  *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+ db.sqlite3-journal
66
+
67
+ # Flask stuff:
68
+ instance/
69
+ .webassets-cache
70
+
71
+ # Scrapy stuff:
72
+ .scrapy
73
+
74
+ # Sphinx documentation
75
+ docs/_build/
76
+
77
+ # PyBuilder
78
+ .pybuilder/
79
+ target/
80
+
81
+ # Jupyter Notebook
82
+ .ipynb_checkpoints
83
+
84
+ # IPython
85
+ profile_default/
86
+ ipython_config.py
87
+
88
+ # pyenv
89
+ # For a library or package, you might want to ignore these files since the code is
90
+ # intended to run in multiple environments; otherwise, check them in:
91
+ # .python-version
92
+
93
+ # pipenv
94
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
96
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
97
+ # install all needed dependencies.
98
+ #Pipfile.lock
99
+
100
+ # UV
101
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
102
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
103
+ # commonly ignored for libraries.
104
+ #uv.lock
105
+
106
+ # poetry
107
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
108
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
109
+ # commonly ignored for libraries.
110
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
111
+ #poetry.lock
112
+
113
+ # pdm
114
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
115
+ #pdm.lock
116
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
117
+ # in version control.
118
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
119
+ .pdm.toml
120
+ .pdm-python
121
+ .pdm-build/
122
+
123
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
124
+ __pypackages__/
125
+
126
+ # Celery stuff
127
+ celerybeat-schedule
128
+ celerybeat.pid
129
+
130
+ # SageMath parsed files
131
+ *.sage.py
132
+
133
+ # Environments
134
+ .env
135
+ .venv
136
+ env/
137
+ venv/
138
+ ENV/
139
+ env.bak/
140
+ venv.bak/
141
+
142
+ # Spyder project settings
143
+ .spyderproject
144
+ .spyproject
145
+
146
+ # Rope project settings
147
+ .ropeproject
148
+
149
+ # mkdocs documentation
150
+ /site
151
+
152
+ # mypy
153
+ .mypy_cache/
154
+ .dmypy.json
155
+ dmypy.json
156
+
157
+ # Pyre type checker
158
+ .pyre/
159
+
160
+ # pytype static type analyzer
161
+ .pytype/
162
+
163
+ # Cython debug symbols
164
+ cython_debug/
165
+
166
+ # PyCharm
167
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
168
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
169
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
170
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
171
+ #.idea/
172
+
173
+ # PyPI configuration file
174
+ .pypirc
README.md CHANGED
@@ -1,98 +1,187 @@
1
- ---
2
- license: mit
3
- ---
4
 
5
- ## 这里是什么
6
 
7
- 这是一个**最小代码快照**:只包含运行 `examples/train_qwen_titans_babilong_v4.py`(Qwen3 + Titans v4,BABILong QA1 32k,跨 chunk 梯度)所需的仓库内代码文件。
8
 
9
- - **不包含**:Qwen 权重、BABILong 数据集文件、以及原项目中 v4 未使用的其它模块
10
- - **用途**:方便复现实验/对照、归档 v4 代码与配置要点
11
 
12
- ---
13
 
14
- ## 代码清单(仅 v4 用到的仓库内代码)
15
 
16
- - `examples/train_qwen_titans_babilong_v4.py`
17
- - `titans_pytorch/neural_memory.py`
18
- - `titans_pytorch/memory_models.py`
19
- - `titans_pytorch/__init__.py`(本仓库内的最小导出,避免引入 v4 未使用模块)
20
- - `LICENSE`(上游 `titans-pytorch` 的 MIT License)
21
 
22
- ---
23
 
24
- ## 权重目录与数据集目录(v4 默认配置)
25
 
26
- `examples/train_qwen_titans_babilong_v4.py``TrainingConfig` 里默认写死了本地路径(需要你按机器环境修改):
27
-
28
- - **Qwen3 权重目录(HF snapshot)**:
29
- - `model_path`:`/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554`
30
- - **BABILong QA1 32k 数据 JSON**:
31
- - `data_path`:`/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json`
32
 
33
- 说明:
34
- - 这两个路径**不会被上传到本仓库**,这里只做“路径与配置说明”。
35
- - v4 脚本当前**没有**提供 `--model_path/--data_path` 命令行参数;如需改路径,请直接改 `TrainingConfig` 里的默认值。
36
 
37
- ---
 
 
38
 
39
- ## 程序特性(v4 重点)
 
 
 
40
 
41
- v4 的目标是:在 32k 长序列 chunk streaming 的训练中,尽可能实现**跨 chunk 的梯度流动**,并在显存可控的前提下训练“记忆模块 + 少量底座参数”。
 
42
 
43
- - **跨 chunk 梯度(核心)**
44
- - `chunkwise_backward=False`:整段序列(32k)一起反传(而不是每个 chunk 独立反传)
45
- - `detach_mem_state=False`:记忆 state 不 detach,使梯度图能跨 chunk 连接
46
- - `cross_chunk_gradient_steps`:限制梯度回传穿过多少个历史 chunk(控制显存/稳定性)
47
 
48
- - **冻结策略(v4 header 描述)**
49
- - 冻结 Qwen backbone 的大部分参数
50
- - **保留可训练**:`embed_tokens`(输入适配)、`lm_head`(必要时解开 tied weights)、以及 Titans 记忆模块相关参数
51
 
52
- - **学习率分组(v4:更细粒度)**
53
- - `lr_memory` / `lr_memory_attention`:记忆模块(含 deep integration 相关)
54
- - `lr_embed`:`embed_tokens`
55
- - `lr_lm_head`:`lm_head`
56
 
57
- - **稳定性与兼容性**
58
- - 脚本开头主动禁用/模拟 `torchao`,避免 `transformers` 导入时的版本冲突
59
- - 建议开启 `gradient_checkpointing=True`(v4 默认开启),缓解 32k full backward 的显存压力
60
- - 支持 DDP/FSDP(FSDP auto-wrap `Qwen3DecoderLayer` 与 v4 自定义层)
 
 
 
 
61
 
62
- - **评估与保存**
63
- - 评估指标为 **answer-only**(只在 `labels != -100` 的答案 token 上计算 loss/acc)
64
- - 输出:
65
- - `eval_metrics.jsonl`
66
- - `final_memory_checkpoint.pt`(仅保存 requires_grad 且属于记忆/门控/embed/head 的参数)
67
- - `final_full_checkpoint.pt`(可选保存完整 state_dict)
68
 
69
- ---
 
70
 
71
- ## 运行方式(示例)
72
 
73
- 以下命令仅作参考;请先在脚本中把 `model_path/data_path` 改成你机器上的真实路径。
 
74
 
75
- - **单机多卡(FSDP)训练**:
76
 
77
  ```bash
78
- torchrun --standalone --nproc_per_node=4 examples/train_qwen_titans_babilong_v4.py --fsdp
79
  ```
80
 
81
- - **评估(eval_only)**:
82
 
83
  ```bash
84
- python examples/train_qwen_titans_babilong_v4.py --eval_only --ckpt_path ./outputs/qwen_titans_babilong_v4/final_memory_checkpoint.pt
85
  ```
86
 
87
- ---
88
 
89
- ## 依赖提示(非完整列表)
 
 
 
 
 
 
 
90
 
91
- 该脚本依赖的关键 Python 包包括:`torch``transformers`、`einops`、`tqdm`、`tensordict`、`assoc-scan`、`einx` 等。
 
 
 
 
 
 
 
 
 
92
 
93
- ---
 
 
 
 
 
 
 
94
 
95
- ## 许可证与来源
 
 
 
 
 
 
 
96
 
97
- 本仓库内的 `titans_pytorch/*` 代码来自上游 `titans-pytorch`(MIT License),对应许可见 `LICENSE`。
 
 
 
 
 
 
 
 
 
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <img src="./fig2.png" width="400px"></img>
 
 
2
 
3
+ <img src="./fig1.png" width="400px"></img>
4
 
5
+ ## Titans - Pytorch
6
 
7
+ Unofficial implementation of [Titans](https://arxiv.org/abs/2501.00663) in Pytorch. Will also contain some explorations into architectures beyond their simple 1-4 layer MLP for the neural memory module, if it works well to any degree.
 
8
 
9
+ [Paper review by Yannic](https://www.youtube.com/watch?v=v67plFw1nMw)
10
 
11
+ [Quick Colab Run](https://colab.research.google.com/drive/11cGgSABykte3qbK-hjzPgLif3-9UUejm?usp=sharing)
12
 
13
+ ## Appreciation
 
 
 
 
14
 
15
+ - [Eryk](https://github.com/sentialx) for sharing his early experimental results with me, positive for 2 layer MLP
16
 
17
+ ## Install
18
 
19
+ ```bash
20
+ $ pip install titans-pytorch
21
+ ```
 
 
 
22
 
23
+ ## Usage
 
 
24
 
25
+ ```python
26
+ import torch
27
+ from titans_pytorch import NeuralMemory
28
 
29
+ mem = NeuralMemory(
30
+ dim = 384,
31
+ chunk_size = 64 # set to smaller chunk size for better perf on smaller sequence lengths (but more memory usage)
32
+ ).cuda()
33
 
34
+ seq = torch.randn(2, 1024, 384).cuda()
35
+ retrieved, mem_state = mem(seq)
36
 
37
+ assert seq.shape == retrieved.shape
38
+ ```
 
 
39
 
40
+ A transformer with the `MAC` configuration can be used as
 
 
41
 
42
+ ```python
43
+ import torch
44
+ from titans_pytorch import MemoryAsContextTransformer
 
45
 
46
+ transformer = MemoryAsContextTransformer(
47
+ num_tokens = 256,
48
+ dim = 256,
49
+ depth = 2,
50
+ segment_len = 128, # local attention window size
51
+ num_persist_mem_tokens = 4,
52
+ num_longterm_mem_tokens = 16,
53
+ )
54
 
55
+ token_ids = torch.randint(0, 256, (1, 1023))
 
 
 
 
 
56
 
57
+ loss = transformer(token_ids, return_loss = True) # (1, 1023, 256)
58
+ loss.backward()
59
 
60
+ # after much training
61
 
62
+ sampled = transformer.sample(token_ids[:, :4], 512)
63
+ ```
64
 
65
+ ## Experiments
66
 
67
  ```bash
68
+ $ pip install uv
69
  ```
70
 
71
+ Then modify `train_mac.py` and run it to query nature
72
 
73
  ```bash
74
+ $ uv run train_mac.py
75
  ```
76
 
77
+ ## Citations
78
 
79
+ ```bibtex
80
+ @inproceedings{Behrouz2024TitansLT,
81
+ title = {Titans: Learning to Memorize at Test Time},
82
+ author = {Ali Behrouz and Peilin Zhong and Vahab S. Mirrokni},
83
+ year = {2024},
84
+ url = {https://api.semanticscholar.org/CorpusID:275212078}
85
+ }
86
+ ```
87
 
88
+ ```bibtex
89
+ @article{Sun2024LearningT,
90
+ title = {Learning to (Learn at Test Time): RNNs with Expressive Hidden States},
91
+ author = {Yu Sun and Xinhao Li and Karan Dalal and Jiarui Xu and Arjun Vikram and Genghan Zhang and Yann Dubois and Xinlei Chen and Xiaolong Wang and Oluwasanmi Koyejo and Tatsunori Hashimoto and Carlos Guestrin},
92
+ journal = {ArXiv},
93
+ year = {2024},
94
+ volume = {abs/2407.04620},
95
+ url = {https://api.semanticscholar.org/CorpusID:271039606}
96
+ }
97
+ ```
98
 
99
+ ```bibtex
100
+ @inproceedings{Yang2024GatedDN,
101
+ title = {Gated Delta Networks: Improving Mamba2 with Delta Rule},
102
+ author = {Songlin Yang and Jan Kautz and Ali Hatamizadeh},
103
+ year = {2024},
104
+ url = {https://api.semanticscholar.org/CorpusID:274598177}
105
+ }
106
+ ```
107
 
108
+ ```bibtex
109
+ @inproceedings{Nguyen2024TurningUT,
110
+ title = {Turning Up the Heat: Min-p Sampling for Creative and Coherent LLM Outputs},
111
+ author = {Minh Nguyen and Andrew Baker and Clement Neo and Allen Roush and Andreas Kirsch and Ravid Shwartz-Ziv},
112
+ year = {2024},
113
+ url = {https://api.semanticscholar.org/CorpusID:270870613}
114
+ }
115
+ ```
116
 
117
+ ```bibtex
118
+ @article{Zhu2024HyperConnections,
119
+ title = {Hyper-Connections},
120
+ author = {Defa Zhu and Hongzhi Huang and Zihao Huang and Yutao Zeng and Yunyao Mao and Banggu Wu and Qiyang Min and Xun Zhou},
121
+ journal = {ArXiv},
122
+ year = {2024},
123
+ volume = {abs/2409.19606},
124
+ url = {https://api.semanticscholar.org/CorpusID:272987528}
125
+ }
126
+ ```
127
 
128
+ ```bibtex
129
+ @article{Zhou2024ValueRL,
130
+ title = {Value Residual Learning For Alleviating Attention Concentration In Transformers},
131
+ author = {Zhanchao Zhou and Tianyi Wu and Zhiyun Jiang and Zhenzhong Lan},
132
+ journal = {ArXiv},
133
+ year = {2024},
134
+ volume = {abs/2410.17897},
135
+ url = {https://api.semanticscholar.org/CorpusID:273532030}
136
+ }
137
+ ```
138
+
139
+ ```bibtex
140
+ @software{Kyrylov_Accelerated_Scan_2024,
141
+ author = {Kyrylov, Volodymyr},
142
+ doi = {10.5281/zenodo.10600962},
143
+ title = {Accelerated Scan},
144
+ version = {0.1.2},
145
+ year = {2024}
146
+ }
147
+ ```
148
+
149
+ ```bibtex
150
+ @misc{wang2025testtimeregressionunifyingframework,
151
+ title = {Test-time regression: a unifying framework for designing sequence models with associative memory},
152
+ author = {Ke Alexander Wang and Jiaxin Shi and Emily B. Fox},
153
+ year = {2025},
154
+ eprint = {2501.12352},
155
+ archivePrefix = {arXiv},
156
+ primaryClass = {cs.LG},
157
+ url = {https://arxiv.org/abs/2501.12352},
158
+ }
159
+ ```
160
+
161
+ ```bibtex
162
+ @misc{jordan2024muon,
163
+ author = {Keller Jordan and Yuchen Jin and Vlado Boza and Jiacheng You and
164
+ Franz Cesista and Laker Newhouse and Jeremy Bernstein},
165
+ title = {Muon: An optimizer for hidden layers in neural networks},
166
+ year = {2024},
167
+ url = {https://kellerjordan.github.io/posts/muon/}
168
+ }
169
+ ```
170
+
171
+ ```bibtex
172
+ @inproceedings{Zhang2025TestTimeTD,
173
+ title = {Test-Time Training Done Right},
174
+ author = {Tianyuan Zhang and Sai Bi and Yicong Hong and Kai Zhang and Fujun Luan and Songlin Yang and Kalyan Sunkavalli and William T. Freeman and Hao Tan},
175
+ year = {2025},
176
+ url = {https://api.semanticscholar.org/CorpusID:279071244}
177
+ }
178
+ ```
179
+
180
+ ```bibtex
181
+ @inproceedings{Behrouz2025ATLASLT,
182
+ title = {ATLAS: Learning to Optimally Memorize the Context at Test Time},
183
+ author = {Ali Behrouz and Ze-Minghui Li and Praneeth Kacham and Majid Daliri and Yuan Deng and Peilin Zhong and Meisam Razaviyayn and Vahab S. Mirrokni},
184
+ year = {2025},
185
+ url = {https://api.semanticscholar.org/CorpusID:278996373}
186
+ }
187
+ ```
examples/BABILONG_DATA_PIPELINE.md ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## BABILong(QA1 / 32k)数据处理与训练数据流说明
2
+
3
+ 本文档描述当前仓库里 **BABILong QA1(32k.json)** 在训练脚本中的实际处理方式:从原始 JSON,到 tokenizer、padding、labels、DataLoader,再到喂给 `QwenTitansForBABILong` 的整条数据流。
4
+
5
+ 代码入口:
6
+
7
+ - `examples/train_qwen_titans_babilong.py`
8
+
9
+ ---
10
+
11
+ ## 数据源与样本格式
12
+
13
+ 默认数据路径(可改):
14
+
15
+ - `TrainingConfig.data_path = /data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json`
16
+
17
+ 文件内容为一个 JSON 列表,每条样本大体包含:
18
+
19
+ - `input`:长上下文(故事/事实)
20
+ - `question`:问题
21
+ - `target`:答案(短文本)
22
+
23
+ 训练脚本会把它拼成 prompt:
24
+
25
+ ```
26
+ {input}
27
+
28
+ Question: {question}
29
+ Answer:
30
+ ```
31
+
32
+ 并把答案拼接为(答案前加空格):
33
+
34
+ ```
35
+ {target}
36
+ ```
37
+
38
+ ---
39
+
40
+ ## 关键目标:固定长度样本(FSDP/DDP 必须)
41
+
42
+ 当前实现 **强制每条样本输出固定长度 `config.max_length`(默认 32768)**,原因:
43
+
44
+ - 模型前向会按 `chunk_size` 把序列分 chunk 循环处理
45
+ - 在 FSDP/DDP 下,如果不同 rank 的序列长度不同 → chunk 数不同 → collectives 顺序不一致 → NCCL watchdog 超时
46
+
47
+ 因此数据侧必须固定长度,保证每步每个 rank 的 chunk 次数一致。
48
+
49
+ 相关参数:
50
+
51
+ - `TrainingConfig.max_length`:固定输出长度(默认 32768)
52
+ - `TrainingConfig.answer_reserve_tokens`:给答案预留 token 数(默认 64)
53
+
54
+ ---
55
+
56
+ ## Dataset:`BABILongDataset.__getitem__` 的处理流程
57
+
58
+ 位置:`examples/train_qwen_titans_babilong.py`
59
+
60
+ ### Step 1:tokenize prompt(截断)
61
+
62
+ - 对 prompt 进行 tokenize
63
+ - 最大长度限制为 `max_length - answer_reserve_tokens`
64
+ - `add_special_tokens=True`(让 tokenizer 自己加 BOS/EOS 等需要的特殊 token)
65
+
66
+ ### Step 2:tokenize answer(不加特殊 token)
67
+
68
+ - 对 `" {target}"` tokenize
69
+ - `add_special_tokens=False`
70
+
71
+ ### Step 3:拼接并截断到 `max_length`
72
+
73
+ - 先算 prompt token 数 `len(prompt_ids)`
74
+ - answer 只保留剩余可用空间:`available = max_length - len(prompt_ids)`
75
+ - `input_ids = concat(prompt_ids, answer_ids[:available])`
76
+
77
+ ### Step 4:构造 `labels`(只监督答案)
78
+
79
+ - `labels` 初始全为 `-100`
80
+ - 只有答案 token 的位置才写入对应 token id
81
+ - 这样 loss 只在答案 token 上计算(prompt 与 padding 不参与 loss)
82
+
83
+ ### Step 5:padding 到固定长度 + attention_mask
84
+
85
+ 如果拼接后长度 `< max_length`:
86
+
87
+ - `input_ids` 右侧 pad 到 `max_length`(pad_id = tokenizer.pad_token_id)
88
+ - `labels` pad 的部分保持 `-100`
89
+ - `attention_mask`:
90
+ - 真 token 为 1
91
+ - padding 为 0
92
+
93
+ > 备注:脚本在 `main()` 里如果发现 `tokenizer.pad_token is None`,会设置 `pad_token = eos_token`,确保有 pad_id。
94
+
95
+ ---
96
+
97
+ ## DataLoader 与分布式采样
98
+
99
+ ### DataLoader
100
+
101
+ - `batch_size = 1`(32k 序列 + chunk streaming,一般只能 1)
102
+ - `collate_fn` 只做 stack(Dataset 已固定长度,不做动态 padding)
103
+ - `num_workers = 0`(避免多进程复制大张量带来的额外开销/不稳定)
104
+
105
+ ### 训练/验证切分
106
+
107
+ - `random_split(full_dataset, [train_size, eval_size], generator=manual_seed(config.seed))`
108
+ - 默认 `train_ratio=0.9`
109
+
110
+ ### 分布式(torchrun)
111
+
112
+ 当使用 `torchrun` 启动时:
113
+
114
+ - 训练集:`DistributedSampler(..., shuffle=True, seed=config.seed)`
115
+ - 验证集:`DistributedSampler(..., shuffle=False)`
116
+ - 每个 epoch 会调用 `train_sampler.set_epoch(epoch)`,保证各 rank shuffle 一致
117
+
118
+ ---
119
+
120
+ ## 喂给模型的数据张量形状
121
+
122
+ 由于固定长度:
123
+
124
+ - `input_ids`: `[B, max_length]`(默认 `[1, 32768]`)
125
+ - `attention_mask`: `[B, max_length]`
126
+ - `labels`: `[B, max_length]`
127
+
128
+ 模型内部再按 `chunk_size`(默认 4096)切成 8 个 chunk 进行 streaming。
129
+
130
+ ---
131
+
132
+ ## 训练与日志(跟数据流相关的行为)
133
+
134
+ - **梯度累积**:`gradient_accumulation_steps=8`
135
+ - 每 8 个 micro-batch 才做一次 optimizer step
136
+ - **每 80 个 batch 输出一次**:
137
+ - `--log_every_batches 80`(默认 80)
138
+ - 会自动换算成 `logging_steps = ceil(log_every_batches / gradient_accumulation_steps)`
139
+ - 并在 rank0 额外 `logger.info(...)` 打一行到终端,方便 `tee` 保存
140
+
141
+ ---
142
+
143
+ ## 运行方式(推荐)
144
+
145
+ ### 8 卡 + FSDP
146
+
147
+ ```bash
148
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
149
+ torchrun --standalone --nproc_per_node=8 \
150
+ examples/train_qwen_titans_babilong.py --fsdp --log_every_batches 80
151
+ ```
152
+
153
+ ### 快速小跑(2 卡调试)
154
+
155
+ ```bash
156
+ CUDA_VISIBLE_DEVICES=0,1 \
157
+ torchrun --standalone --nproc_per_node=2 \
158
+ examples/train_qwen_titans_babilong.py --fsdp --max_samples 8 --num_epochs 1 --eval_steps 1000000
159
+ ```
160
+
161
+ ---
162
+
163
+ ## 训练产物(输出)
164
+
165
+ 默认输出目录:
166
+
167
+ - `TrainingConfig.output_dir = ./outputs/qwen_titans_babilong`
168
+
169
+ 默认只保存一个 final checkpoint(覆盖写入):
170
+
171
+ - `final_memory_checkpoint.pt`
172
+
173
+ 内容包括:
174
+
175
+ - `memory_state_dict`:只包含 `long_term_memory` / `memory_gate` 的参数(体积更小)
176
+ - `global_step`
177
+
examples/QWEN3_TITANS_MEMORY_INTEGRATION.md ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Qwen3-4B-Instruct + Titans NeuralMemory(MAC 风格)集成流程说明
2
+
3
+ 本文档描述当前仓库里 **“Qwen3-4B-Instruct 作为 Core(短期处理器) + Titans NeuralMemory 作为长期记忆”** 的实际落地方式,代码入口为:
4
+
5
+ - `examples/train_qwen_titans_babilong.py`
6
+
7
+ ---
8
+
9
+ ## 整体思路(Streaming + MAC)
10
+
11
+ - **Core**:`Qwen3-4B-Instruct` 只负责处理一个可控窗口大小的 chunk(例如 4k)。
12
+ - **Long-term Memory**:`titans_pytorch.NeuralMemory` 维护一个随 chunk 滚动更新的 **memory_state**(快权重/动量等状态)。
13
+ - **MAC(Memory-as-Context)风格注入**:每个 chunk 开始时,从长期记忆 **读取一小段 memory tokens**,将它们 **作为“额外上下文 token”前缀** 拼到 chunk 前面,让 Qwen 在注意力里直接可用。
14
+
15
+ 这使得整体序列长度可以远大于 Qwen 的单次可承受长度:Qwen 看到的是「当前 chunk + 记忆 tokens」,而长期信息通过 Titans 的 state 跨 chunk 保留与检索。
16
+
17
+ ---
18
+
19
+ ## 关键代码组件
20
+
21
+ ### 1) `TitansLongTermMemory`:长期记忆模块封装
22
+
23
+ 位置:`examples/train_qwen_titans_babilong.py`
24
+
25
+ 内部包含:
26
+
27
+ - **`self.neural_memory: NeuralMemory`**
28
+ - `dim = hidden_size`
29
+ - `heads = config.memory_heads`
30
+ - `dim_head = config.memory_dim_head`
31
+ - `model = MemoryMLP(dim=dim_head, depth=config.memory_depth, ...)`
32
+ - 写入稳定性相关配置:`default_step_transform_max_lr / init_adaptive_step_bias / max_grad_norm / ...`
33
+ - **`self.memory_query_tokens: nn.Parameter`**
34
+ - 形状 `[1, 16, hidden_size]`
35
+ - 作为“从长期记忆里检索”的查询 token(可训练)
36
+ - **`self.memory_proj: nn.Sequential`**
37
+ - 将 `retrieve_memories()` 的输出再映射回适合注入 Qwen 的表征空间
38
+
39
+ #### `read()`:只读检索(不会写入)
40
+
41
+ 当前实现 **明确避免** 使用 `NeuralMemory.forward(queries)` 来读:
42
+
43
+ - `NeuralMemory.forward()` 默认会同时做 store + retrieve
44
+ - 如果用 queries 调 `forward()`,会把 query 也写进记忆,造成污染
45
+
46
+ 因此当前实现采用:
47
+
48
+ - 从 `memory_state.states[0]` 取出“最新快权重”作为 `weights`
49
+ - 调用 `self.neural_memory.retrieve_memories(queries, weights)` 完成纯检索
50
+
51
+ #### `write()`:写入更新(跨 chunk 不反传)
52
+
53
+ 写入时调用:
54
+
55
+ - `self.neural_memory(hidden_states, state=..., store_mask=..., detach_mem_state=True)`
56
+
57
+ 其中 `detach_mem_state=True` 的含义是:
58
+
59
+ - **记忆 state 的更新过程不参与跨 chunk 的反向传播**
60
+ - 优点:训练更稳、显存更省、避免长链条反传导致 NaN/爆显存
61
+ - 代价:loss 不会“端到端”优化写入更新过程(但仍可优化 query/proj 等参数)
62
+
63
+ ---
64
+
65
+ ### 2) `QwenTitansForBABILong`:Qwen + 记忆的主包装模型
66
+
67
+ 位置:`examples/train_qwen_titans_babilong.py`
68
+
69
+ 包含:
70
+
71
+ - `self.qwen`: `AutoModelForCausalLM` 加载的 Qwen3
72
+ - `self.long_term_memory`: `TitansLongTermMemory`
73
+ - `self.memory_gate`: 当前仅占位(参数会训练/保存,但 **forward 里未实际使用**,目前是“纯 memory tokens 前缀注入”路线)
74
+
75
+ 并做了关键的 device/dtype 对齐:
76
+
77
+ - Qwen 通常用 `bf16`
78
+ - 记忆模块可以强制用 `float32`(`config.memory_fp32=True`)以降低 NaN 风险
79
+
80
+ ---
81
+
82
+ ## 前向流程(单个样本,Streaming)
83
+
84
+ 入口:`QwenTitansForBABILong.forward(input_ids, attention_mask, labels)`
85
+
86
+ ### Step 0:输入形状与 chunk 划分
87
+
88
+ - 输入被固定为 `seq_len = config.max_length`(例如 32768)
89
+ - 用 `chunk_size`(例如 4096)切成多个 chunks(例如 8 个)
90
+ - 为了覆盖 chunk 边界的 next-token 预测,每个 chunk 会带 **1 个 overlap token**:
91
+ - `proc_start = max(0, start - 1)`
92
+ - 当前 chunk 实际处理 `[proc_start, end)`,但写入记忆只写 `[start, end)`(避免重复写 overlap)
93
+
94
+ ### Step 1:从长期记忆读取 memory tokens
95
+
96
+ - 初始 `memory_state = None`:第一段不会注入 memory tokens
97
+ - 从第二个 chunk 开始:
98
+ - `memory_tokens = long_term_memory.read(batch_size, memory_state, num_tokens=config.num_memory_tokens)`
99
+
100
+ ### Step 2:把 memory tokens 作为前缀注入 Qwen(MAC)
101
+
102
+ 在 `_process_chunk()` 中完成:
103
+
104
+ - 先拿到 token embeddings:`token_embeds = qwen.model.embed_tokens(chunk_ids)`
105
+ - 若存在 memory tokens:
106
+ - `nan_to_num + scale + clamp`(避免记忆扰动太大引发不稳定)
107
+ - `torch.cat([memory_tokens, token_embeds], dim=1)` 作为 `inputs_embeds`
108
+ - 同步扩展 `attention_mask`:为 memory token 补 1
109
+
110
+ ### Step 3:调用 `self.qwen.model(...)` 做 chunk 前向
111
+
112
+ 这里必须走 `Qwen3Model.forward()`(即 `self.qwen.model`),原因是:
113
+
114
+ - Qwen3Attention 依赖 `position_embeddings=(cos, sin)` 等由 `Qwen3Model` 内部生成并传入
115
+ - 不能直接逐层调用 `Qwen3DecoderLayer`(会出现 `cos, sin = position_embeddings` 的 None 问题)
116
+
117
+ 得到:
118
+
119
+ - `hidden_states = outputs.last_hidden_state`
120
+ - 如果注入了 memory tokens,则把它们从��出里切掉:`hidden_states = hidden_states[:, num_mem:]`
121
+
122
+ ### Step 4:把 chunk 的 hidden 写入 Titans 记忆
123
+
124
+ 写入采用:
125
+
126
+ - 去掉 overlap 的 `chunk_hidden`
127
+ - `store_mask` 用 `attention_mask`(padding=0 的位置不写入)
128
+ - 调用 `long_term_memory.write(mem_inp, state=memory_state, store_mask=...)`
129
+
130
+ 返回并更新:
131
+
132
+ - `memory_state = next_state`
133
+
134
+ ### Step 5:只在“答案 tokens”上计算 loss(省显存)
135
+
136
+ `labels` 中:
137
+
138
+ - prompt 与 padding 为 `-100`
139
+ - 仅答案 token 有监督标签
140
+
141
+ loss 计算策略:
142
+
143
+ - 只选择 `labels != -100` 的位置
144
+ - 对应 hidden 做 shift(next-token)
145
+ - 仅对有效位置做 `lm_head` 与交叉熵(避免全 vocab、全序列 logits 占用巨大显存)
146
+
147
+ ---
148
+
149
+ ## 分布式训练(FSDP)集成要点
150
+
151
+ 代码入口仍在:`examples/train_qwen_titans_babilong.py`
152
+
153
+ - **必须 `device_map=None`**:FSDP/DDP 不允许 HF 自动切分到多卡
154
+ - **强制 `attn_implementation="sdpa"`**:避免环境里 flash-attn/torchao/torchvision 的兼容性导入问题
155
+ - **FSDP wrap 策略**:
156
+ - 只 wrap `Qwen3DecoderLayer`
157
+ - `ignored_modules=[model.long_term_memory, model.memory_gate]`:记忆模块不参与分片(便于保存/调试,也避免小模块反复 allgather)
158
+ - **固定长度输入(非常关键)**:
159
+ - FSDP 需要所有 rank 每步进入 collectives 的顺序一致
160
+ - 如果不同 rank 的 chunk 次数不同,会出现 `_ALLGATHER_BASE` / `ALLREDUCE` 顺序错位,从而 NCCL watchdog 超时
161
+ - 因此数据侧必须 pad 到 `config.max_length`,保证每步 chunk 数一致
162
+
163
+ ---
164
+
165
+ ## 当前实现的取舍与可改进点
166
+
167
+ - **`memory_gate` 目前未参与 forward**:现在是纯“memory tokens 作为上下文前缀”的 MAC 注入;如果需要门控融合,需要在 forward 里显式引入 gate 计算与融合路径。
168
+ - **`detach_mem_state=True`**:训练更稳/省显存,但不会端到端训练“写入更新过程”;如果后续要做更强的端到端学习,需要重新评估这条策略(以及显存与稳定性代价)。
169
+
examples/QWEN_TITANS_BABILONG_TRAINING_FLOW_CN.txt ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Qwen + Titans 在 BABILong(qa1/32k) 上的训练流程说明(基于 examples/train_qwen_titans_babilong.py)
2
+ ================================================================================
3
+
4
+ 目的
5
+ ----
6
+ 本脚本把 BABILong QA1 32k 数据做成“固定长度 32k”的监督样本,然后用 Titans 的 NeuralMemory 作为长期记忆,
7
+ 以 chunk(流式)方式处理超长序列:每个 chunk 先读记忆并注入 memory tokens,再跑 Qwen 得到 hidden_states,
8
+ 写回记忆,最后只对“答案 token”位置计算 loss,用分组学习率训练(记忆模块高 LR、Qwen 低 LR)。
9
+
10
+
11
+ 1. 数据到张量(Dataset / DataLoader)
12
+ ------------------------------------
13
+ 入口:BABILongDataset(data_path, tokenizer, max_length=32768, answer_reserve_tokens=64, max_samples=...)
14
+
15
+ 1) 读取数据
16
+ - data_path 指向 qa1/32k.json
17
+ - json.load() 读入一个 list,每条样本假设包含:
18
+ - input: 长上下文(context)
19
+ - question: 问题
20
+ - target: 答案(文本)
21
+
22
+ 2) 构造 prompt + answer
23
+ - prompt 文本格式:
24
+ "{input}\n\nQuestion: {question}\nAnswer:"
25
+ - answer 文本:f" {target}"(前导空格)
26
+
27
+ 3) Tokenize + 截断 + 拼接(重点:固定长度)
28
+ - prompt_ids = tokenizer(prompt, truncation=True, max_length=max_length - reserve, add_special_tokens=True)
29
+ 说明:reserve=answer_reserve_tokens,强制给答案预留 token 空间,避免答案被 prompt 截断挤掉。
30
+ - answer_ids = tokenizer(answer, add_special_tokens=False),并截断到剩余空间 available。
31
+ - input_ids = concat(prompt_ids, answer_ids),再截断到 max_length。
32
+
33
+ 4) labels(只监督答案 token)
34
+ - labels 初始全为 -100(忽略)
35
+ - 仅将 answer 对应区间(prompt 末尾开始)设置为真实 token id:
36
+ labels[start:end] = input_ids[start:end]
37
+ 所以 loss 只来自答案 token,prompt 与 padding 都不算。
38
+
39
+ 5) padding + attention_mask(固定 shape 的关键)
40
+ - 任何样本都会 pad 到 max_length(默认 32768):
41
+ - input_ids 用 pad_token_id 填充
42
+ - labels 用 -100 填充
43
+ - attention_mask:真实 token 为 1,padding 为 0
44
+
45
+ 6) collate_fn
46
+ - Dataset 已保证固定长度,因此 collate_fn 只做 stack:
47
+ {k: torch.stack([...], dim=0)}
48
+
49
+ 为什么必须固定长度?
50
+ - DDP/FSDP 下如果不同 rank 的序列长度不同,会导致 chunk 次数不同,
51
+ 从而 collective 调用顺序不一致,最终容易出现 NCCL timeout。
52
+
53
+
54
+ 2. Qwen 加载与基础设置(Transformers)
55
+ -------------------------------------
56
+ 入口:main() 中 AutoTokenizer / AutoModelForCausalLM
57
+
58
+ 1) tokenizer
59
+ - AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
60
+ - 若 tokenizer.pad_token 为空,则设置为 tokenizer.eos_token(便于 padding)
61
+
62
+ 2) 兼容性禁用(flash-attn / torchao / torchvision)
63
+ - 脚本显式把 transformers 的可用性检测函数改成返回 False,
64
+ 避免环境里“包存在但与 torch 不兼容”导致 import 崩溃。
65
+
66
+ 3) qwen_model
67
+ - AutoModelForCausalLM.from_pretrained(
68
+ model_path,
69
+ torch_dtype=bf16/fp16/fp32(默认 bf16),
70
+ device_map=None(DDP/FSDP 不允许切分),
71
+ trust_remote_code=True,
72
+ attn_implementation="sdpa"(强制走 PyTorch SDPA),
73
+ low_cpu_mem_usage=True
74
+ )
75
+ - qwen_model.to(device)
76
+ - qwen_model.config.use_cache=False
77
+ - gradient_checkpointing_enable()(省显存)
78
+
79
+ 备注:脚本标题写 Qwen3-4B,但流程对 Qwen3-3B 完全一致,只需替换 model_path。
80
+
81
+
82
+ 3. Titans 长期记忆模块(TitansLongTermMemory)
83
+ ---------------------------------------------
84
+ 类:TitansLongTermMemory(hidden_size, chunk_size=64, batch_size=128, dim_head=64, heads=..., memory_depth=2)
85
+
86
+ 内部组件:
87
+ 1) MemoryMLP(dim=dim_head, depth=memory_depth, expansion_factor=2.0)
88
+ 2) NeuralMemory(
89
+ dim=hidden_size,
90
+ chunk_size=memory_chunk_size,
91
+ batch_size=memory_batch_size,
92
+ dim_head=memory_dim_head,
93
+ heads=memory_heads,
94
+ model=MemoryMLP,
95
+ momentum=True,
96
+ qk_rmsnorm=True,
97
+ pre_rmsnorm=True,
98
+ default_step_transform_max_lr=1e-2,
99
+ init_adaptive_step_bias=-6.0,
100
+ max_grad_norm=1.0,
101
+ spectral_norm_surprises=True,
102
+ use_accelerated_scan=True
103
+ )
104
+ 3) memory_query_tokens:可学习查询 token(默认 16 个)
105
+ 4) memory_proj:LayerNorm + Linear + GELU + Linear(检索结果投影)
106
+
107
+ 写入(write):
108
+ - self.neural_memory(hidden_states, state=state, store_mask=..., detach_mem_state=True)
109
+ - detach_mem_state=True:跨 chunk 不反传(更稳、更省显存)
110
+ - store_mask:把 padding 的位置 mask 掉,不写入记忆
111
+
112
+ 读取(read):
113
+ - 不用 self.neural_memory(queries, ...) 的 forward(因为 forward 默认 store+retrieve,会把 query 写进记忆污染)
114
+ - 从 state.states[0] 取 last_update,构造 weights,调用:
115
+ self.neural_memory.retrieve_memories(queries, weights)
116
+ - 再过 memory_proj 得到 memory tokens(embedding 形态)
117
+
118
+
119
+ 4. 把 Titans “加到” Qwen:包装模型 QwenTitansForBABILong
120
+ ---------------------------------------------------------
121
+ 类:QwenTitansForBABILong(qwen_model, config)
122
+
123
+ 成员:
124
+ - self.qwen:Transformers CausalLM(Qwen)
125
+ - self.long_term_memory:TitansLongTermMemory
126
+ - self.memory_gate:Linear+Sigmoid(参数分组会包含它;当前 forward 里未显式用 gate 融合)
127
+
128
+ 设备与 dtype 对齐:
129
+ - 为避免 device mismatch(尤其是多卡/切分环境),脚本会把长期记忆模块放到与 Qwen 输出所在 device 一致。
130
+ - config.memory_fp32=True 时,记忆模块用 float32 计算;Qwen 保持 bf16/fp16。
131
+
132
+
133
+ 5. 32k 序列的流式前向:chunk 循环(核心)
134
+ ----------------------------------------
135
+ 入口:QwenTitansForBABILong.forward(input_ids, attention_mask, labels)
136
+
137
+ 关键配置:
138
+ - config.max_length = 32768(数据侧固定长度)
139
+ - config.chunk_size = 4096(Qwen 前向每次只跑 4k token)
140
+ - config.num_memory_tokens = 16(每个 chunk 注入的记忆 token 数)
141
+
142
+ 步骤:
143
+ 1) 切 chunk
144
+ - 将 input_ids 按 chunk_size 4096 切分为多个片段 (start, end)
145
+ - 为覆盖 chunk 边界的 next-token loss,处理时会带 1 个 overlap token:
146
+ proc_start = max(0, start - 1)
147
+ chunk_ids = input_ids[:, proc_start:end]
148
+
149
+ 2) 逐 chunk 执行:读记忆 → 注入 → 跑 Qwen → 写回记忆 → 算 loss
150
+ 2.1 读记忆(read)
151
+ - 若 memory_state 非空:
152
+ memory_tokens = long_term_memory.read(batch_size, memory_state, num_tokens=16)
153
+
154
+ 2.2 注入 memory tokens,并跑 Qwen 得 hidden_states(_process_chunk)
155
+ - token_embeds = qwen.model.embed_tokens(chunk_ids)(或 get_input_embeddings)
156
+ - 若有 memory_tokens:
157
+ a) nan_to_num + scale + clamp(避免记忆输出过大扰动导致 NaN)
158
+ - scale = config.memory_token_scale(默认 0.05)
159
+ - clip = config.memory_token_clip(默认 2.0)
160
+ b) 拼到 embedding 前面:
161
+ inputs_embeds = cat([memory_tokens, token_embeds], dim=1)
162
+ attention_mask 也在前面补 1
163
+ - 必须走 qwen.model.forward(inputs_embeds=..., attention_mask=...)
164
+ (不能直接逐层调用 decoder layer,因为 Qwen3 attention 依赖 position_embeddings(cos/sin))
165
+ - outputs.last_hidden_state 得到 hidden_states
166
+ - 去掉前缀 memory token 对应的 hidden,只保留真实 token 对应 hidden
167
+
168
+ 2.3 写回记忆(write)
169
+ - 只写原始 [start, end) 对应 hidden,避免 overlap 的 token 重复写入:
170
+ start>0 时跳过 hidden_full 的第一个位置
171
+ - store_mask 同样对齐并屏蔽 padding
172
+ - memory_fp32=True 时写入前转 float32
173
+ - detach_mem_state=True:跨 chunk 不反传
174
+
175
+ 2.4 计算 loss(只在答案 token,且只算必要 logits)
176
+ - shift_hidden = hidden_full[:, :-1]
177
+ - shift_labels = labels[:, 1:]
178
+ - valid = (shift_labels != -100)
179
+ - 仅对 valid 位置 flatten:
180
+ hs = shift_hidden[valid]
181
+ targets = shift_labels[valid]
182
+ - logits = qwen.lm_head(hs)(只算这部分 vocab logits,避免为整段 32k 建巨型 logits 张量)
183
+ - CrossEntropyLoss(reduction="sum") 求和,再除以有效 token 数得到 token 平均 loss
184
+ - 脚本对 hs/logits 做 nan_to_num 兜底,降低单 chunk 非有限导致整步 NaN 的风险。
185
+
186
+ 输出:
187
+ - out["loss"]:答案 token 的 token 平均 loss
188
+ - 可选:teacher-forcing 的 pred_ids/target_ids(评估打印样例时使用)
189
+
190
+
191
+ 6. 训练器(Trainer):优化器、混合精度、梯度累积、分布式
192
+ ----------------------------------------------------------
193
+ 入口:Trainer(model, train_dataloader, eval_dataloader, config, ...)
194
+
195
+ 1) 参数分组学习率(关键)
196
+ - model.get_param_groups() 依据参数名分两组:
197
+ - 含 long_term_memory / memory_gate:lr = lr_memory(默认 1e-4)
198
+ - 其余(Qwen):lr = lr_pretrained(默认 5e-6)
199
+ - AdamW(param_groups)
200
+
201
+ 2) Scheduler
202
+ - CosineAnnealingLR(T_max=总 optimizer step 数, eta_min=1e-7)
203
+ - total_steps = ceil((len(dataloader)*epochs) / gradient_accumulation_steps)
204
+
205
+ 3) 混合精度
206
+ - 默认 bf16 autocast(torch.amp.autocast)
207
+ - 若 fp16 才启用 GradScaler;bf16 不用 scaler(通常更稳)
208
+
209
+ 4) 主训练循环(train)
210
+ - 每个 micro-batch:
211
+ a) autocast 前向,loss / gradient_accumulation_steps
212
+ b) 分布式一致性检查:
213
+ - 若任意 rank loss 非有限(nan/inf),all_reduce(MIN) 后所有 rank 一起 skip
214
+ - 目的:避免 DDP/FSDP 梯度同步死锁
215
+ c) 梯度累积:
216
+ - 非同步步用 model.no_sync()(减少通信)
217
+ - 同步步:
218
+ - clip_grad_norm
219
+ - optimizer.step
220
+ - scheduler.step
221
+ - zero_grad
222
+ - global_step++
223
+ d) 按 eval_steps 触发 evaluate()
224
+
225
+ 5) 评估(evaluate)
226
+ - 同样走模型 forward,但可选收集 pred_ids/target_ids
227
+ - 计算:
228
+ - answer-only loss
229
+ - token-level accuracy(答案 token)
230
+ - EM(样本级答案完全匹配,teacher-forcing decode 对比;需 tokenizer)
231
+
232
+
233
+ 7. DDP / FSDP 包裹与注意点
234
+ --------------------------
235
+ 1) init_distributed
236
+ - 通过 torchrun 的环境变量 RANK/WORLD_SIZE/LOCAL_RANK 初始化 NCCL
237
+ - torch.cuda.set_device(local_rank)
238
+
239
+ 2) DDP
240
+ - DDP(model, device_ids=[local_rank], output_device=local_rank)
241
+
242
+ 3) FSDP(可选)
243
+ - auto_wrap_policy 只 wrap Qwen3DecoderLayer(transformer 层)
244
+ - ignored_modules=[model.long_term_memory, model.memory_gate]
245
+ 让 Titans 记忆模块不被分片,便于稳定训练与单独保存/加载
246
+
247
+
248
+ 8. 保存与加载 checkpoint
249
+ -----------------------
250
+ 训练结束会保存两类 checkpoint(覆盖写入):
251
+ 1) memory-only(小文件)
252
+ - final_memory_checkpoint.pt
253
+ - 保存 long_term_memory 与 memory_gate 的参数 + global_step + config
254
+
255
+ 2) full(大文件,可选,默认开启)
256
+ - final_full_checkpoint.pt
257
+ - 保存完整 model_state_dict(包含 Qwen + Titans),避免只存 memory 导致 Qwen 微调更新丢失
258
+
259
+ eval-only 模式:
260
+ - --eval_only 时优先尝试加载 full checkpoint(若包含 model_state_dict)
261
+ - 无论是否 full,都会再按“只加载记忆模块参数”的方式加载 memory_state_dict(兼容 memory-only)
262
+ - 支持 --max_eval_samples 快速只评估前 N 条验证样本
263
+
264
+
265
+ 9. 一句话总结(从数据到训练)
266
+ ----------------------------
267
+ JSON 样本(input/question/target)
268
+ → 固定长度 32k 的 input_ids/attention_mask,labels 只标注答案 token
269
+ → 32k 按 4k chunk 循环:读 Titans 记忆→注入 16 个 memory tokens→跑 Qwen 得 hidden→写回记忆(跨 chunk detach)→只在答案 token 位置算 CE loss
270
+ → AdamW 分组学习率(记忆高 LR / Qwen 低 LR)+ bf16 autocast + 梯度累积 + DDP/FSDP
271
+ → 最终保存 memory-only 与可选的完整权重 checkpoint。
272
+
examples/TITANS_ANALYSIS_CN.md ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Titans-PyTorch 代码分析报告
2
+
3
+ ## 概述
4
+
5
+ Titans 是 Google 在 2024 年底提出的新型长期记忆机制,核心思想是**将神经网络的权重作为长期记忆**,通过**测试时训练(Test-Time Training, TTT)**来动态存储和检索信息。
6
+
7
+ ## 核心组件分析
8
+
9
+ ### 1. NeuralMemory (神经记忆模块)
10
+
11
+ ```
12
+ 位置: titans_pytorch/neural_memory.py
13
+ ```
14
+
15
+ 这是 Titans 的核心组件,主要特点:
16
+
17
+ #### 1.1 记忆存储机制
18
+ - **记忆载体**: 使用 MLP(或其他模型)的权重作为记忆存储
19
+ - **存储过程**: 通过计算梯度来"写入"记忆
20
+ ```python
21
+ # 损失函数:|M(k) - v|²
22
+ # 其中 M 是记忆网络,k 是 key,v 是 value
23
+ def default_loss_fn(pred, target):
24
+ return (pred - target).pow(2).mean(dim = -1)
25
+ ```
26
+
27
+ #### 1.2 记忆检索机制
28
+ - 将 query 输入到记忆网络,获取存储的信息
29
+ - 使用 `functional_call` 进行前向传播
30
+
31
+ #### 1.3 关键参数
32
+
33
+ | 参数 | 说明 | 建议值 |
34
+ |------|------|--------|
35
+ | `chunk_size` | 分块大小,控制记忆更新粒度 | 32-128 |
36
+ | `batch_size` | 批处理大小,控制权重更新频率 | 64-256 |
37
+ | `momentum` | 是否使用动量优化 | True |
38
+ | `momentum_order` | 动量阶数 | 1-2 |
39
+ | `dim_head` | 每个头的维度 | 64 |
40
+ | `heads` | 注意力头数 | 4-8 |
41
+
42
+ ### 2. MemoryMLP (记忆网络)
43
+
44
+ ```
45
+ 位置: titans_pytorch/memory_models.py
46
+ ```
47
+
48
+ Titans 提供了多种记忆网络架构:
49
+
50
+ | 模型 | 描述 | 适用场景 |
51
+ |------|------|----------|
52
+ | `MemoryMLP` | 基础 MLP,来自 TTT 论文 | 通用场景 |
53
+ | `MemorySwiGluMLP` | SwiGLU 激活的 MLP | 更强表达能力 |
54
+ | `FactorizedMemoryMLP` | 分解权重的 MLP | 降低参数量 |
55
+ | `MemoryAttention` | 使用注意力的记忆 | 复杂依赖关系 |
56
+ | `GatedResidualMemoryMLP` | 门控残差 MLP | 深层网络 |
57
+
58
+ ### 3. MemoryAsContextTransformer (MAC Transformer)
59
+
60
+ ```
61
+ 位置: titans_pytorch/mac_transformer.py
62
+ ```
63
+
64
+ 将 NeuralMemory 集成到 Transformer 中的完整实现:
65
+
66
+ #### 核心设计
67
+ 1. **Segment 分段**: 将长序列分成固定长度的 segment
68
+ 2. **Longterm Memory Tokens**: 在每个 segment 开头添加记忆 token
69
+ 3. **Persistent Memory**: 全局共享的持久记忆 token
70
+ 4. **记忆更新**: 通过 NeuralMemory 动态更新记忆
71
+
72
+ ## 将 Titans 集成到 Qwen 的三种方案
73
+
74
+ ### 方案 1: 外部包装器(最简单)
75
+
76
+ ```python
77
+ class TitansMemoryWrapper(nn.Module):
78
+ def __init__(self, qwen_model, ...):
79
+ self.qwen = qwen_model
80
+ self.neural_memory = NeuralMemory(...)
81
+
82
+ def forward(self, input_ids, memory_state=None):
83
+ # 1. 获取 Qwen 隐藏状态
84
+ hidden = self.qwen(..., output_hidden_states=True).hidden_states[-1]
85
+
86
+ # 2. 记忆增强
87
+ retrieved, next_state = self.neural_memory(hidden, state=memory_state)
88
+
89
+ # 3. 融合输出
90
+ enhanced = hidden + gate * retrieved
91
+ return enhanced, next_state
92
+ ```
93
+
94
+ **优点**: 不修改 Qwen 内部结构,易于实现
95
+ **缺点**: 记忆与模型的交互较浅
96
+
97
+ ### 方案 2: 层级集成(中等复杂度)
98
+
99
+ ```python
100
+ class QwenDecoderLayerWithMemory(nn.Module):
101
+ def forward(self, hidden_states, memory_state=None):
102
+ # 标准 attention
103
+ attn_output = self.self_attn(hidden_states)
104
+ hidden_states = residual + attn_output
105
+
106
+ # Titans 记忆增强
107
+ retrieved, next_state = self.neural_memory(hidden_states, state=memory_state)
108
+ hidden_states = hidden_states + gate * retrieved
109
+
110
+ # 标准 FFN
111
+ hidden_states = self.mlp(hidden_states)
112
+ return hidden_states, next_state
113
+ ```
114
+
115
+ **优点**: 记忆与模型深度集成
116
+ **缺点**: 需要修改模型结构
117
+
118
+ ### 方案 3: Memory-as-Context(最接近论文)
119
+
120
+ ```python
121
+ class QwenWithMAC(nn.Module):
122
+ def __init__(self, qwen_model, ...):
123
+ self.qwen = qwen_model
124
+ self.longterm_mem = nn.Parameter(...) # 长期记忆 token
125
+ self.persist_mem = nn.Parameter(...) # 持久记忆 token
126
+ self.neural_memories = nn.ModuleDict({
127
+ '2': NeuralMemory(...),
128
+ '4': NeuralMemory(...),
129
+ '6': NeuralMemory(...),
130
+ })
131
+ ```
132
+
133
+ **优点**: 最接近论文实现,效果最好
134
+ **缺点**: 实现复杂,需要处理位置编码
135
+
136
+ ## 关键实现细节
137
+
138
+ ### 1. 梯度计算
139
+
140
+ Titans 使用 `torch.func.vmap` 和 `torch.func.grad` 进行高效的批量梯度计算:
141
+
142
+ ```python
143
+ from torch.func import functional_call, vmap, grad
144
+
145
+ def forward_and_loss(params, inputs, loss_weights, target):
146
+ pred = functional_call(self.memory_model, params, inputs)
147
+ loss = (pred - target).pow(2).mean(dim=-1)
148
+ return (loss * loss_weights).sum(), loss
149
+
150
+ grad_fn = grad(forward_and_loss, has_aux=True)
151
+ self.per_sample_grad_fn = vmap(grad_fn, in_dims=(0, 0, 0, 0))
152
+ ```
153
+
154
+ ### 2. Associative Scan(关联扫描)
155
+
156
+ 用于高效计算动量和权重衰减:
157
+
158
+ ```python
159
+ from assoc_scan import AssocScan
160
+
161
+ self.assoc_scan = AssocScan(use_accelerated=True)
162
+ # 用于计算: x[t] = α[t] * x[t-1] + (1-α[t]) * input[t]
163
+ ```
164
+
165
+ ### 3. 自适应学习率
166
+
167
+ 每个 token 的学习率是动态学习的:
168
+
169
+ ```python
170
+ adaptive_lr = self.to_adaptive_step(seq)
171
+ adaptive_lr = adaptive_lr.sigmoid() * max_lr # 限制在 [0, max_lr]
172
+ ```
173
+
174
+ ## 性能优化建议
175
+
176
+ ### 1. chunk_size 选择
177
+ - 较小的 chunk_size(如 32): 更细粒度的记忆更新,但内存占用更大
178
+ - 较大的 chunk_size(如 128): 更高效,但记忆粒度较粗
179
+
180
+ ### 2. batch_size 选择
181
+ - 较小的 batch_size(如 64): 更频繁的权重更新,可能更好地捕捉细节
182
+ - 较大的 batch_size(如 256): 训练更稳定,但可能丢失细节
183
+
184
+ ### 3. 使用加速扫描
185
+ ```python
186
+ NeuralMemory(..., use_accelerated_scan=True)
187
+ ```
188
+
189
+ ### 4. 使用 FlexAttention(PyTorch 2.0+)
190
+ ```python
191
+ MemoryAsContextTransformer(..., use_flex_attn=True)
192
+ ```
193
+
194
+ ## 安装和依赖
195
+
196
+ ```bash
197
+ pip install titans-pytorch
198
+
199
+ # 完整依赖
200
+ pip install torch einops tensordict assoc-scan rotary-embedding-torch x-transformers hyper-connections axial-positional-embedding
201
+ ```
202
+
203
+ ## 参考文献
204
+
205
+ 1. [Titans: Learning to Memorize at Test Time](https://arxiv.org/abs/2401.00000) - Google, 2024
206
+ 2. [Learning to (Learn at Test Time): RNNs with Expressive Hidden States](https://arxiv.org/abs/2407.04620) - TTT Paper
207
+ 3. [Gated Delta Networks](https://arxiv.org/abs/2401.00000) - Momentum 机制参考
208
+
209
+ ## 示例代码位置
210
+
211
+ 详细的集成示例代码请参考:
212
+ `examples/qwen_with_titans_memory.py`
examples/eval_qwen_baseline.py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Qwen3-4B baseline evaluation on BABILong QA1 (32k).
3
+
4
+ This script evaluates the pretrained Qwen model WITHOUT any training,
5
+ using the same chunk-based streaming approach as the Titans training script.
6
+
7
+ Purpose: Establish a baseline to compare with Titans memory-augmented models.
8
+ """
9
+
10
+ import os
11
+ import json
12
+ import math
13
+ import argparse
14
+ import logging
15
+ from dataclasses import dataclass
16
+ from typing import Optional, Dict, List
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from torch.utils.data import Dataset, DataLoader
22
+ from tqdm import tqdm
23
+
24
+ logging.basicConfig(
25
+ level=logging.INFO,
26
+ format="%(asctime)s - %(levelname)s - %(message)s"
27
+ )
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ @dataclass
32
+ class EvalConfig:
33
+ # paths - same as training config
34
+ model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554"
35
+ data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json"
36
+ output_dir: str = "./outputs/qwen_baseline_eval"
37
+
38
+ # streaming settings - same as training
39
+ chunk_size: int = 8192
40
+ max_length: int = 32768
41
+ answer_reserve_tokens: int = 64
42
+
43
+ # evaluation
44
+ batch_size: int = 1 # use 1 for simplicity in baseline eval
45
+ max_samples: Optional[int] = 500 # same as training default
46
+ print_examples: int = 20
47
+
48
+ # precision
49
+ bf16: bool = True
50
+ fp16: bool = False
51
+ use_tf32: bool = True
52
+
53
+ seed: int = 42
54
+
55
+
56
+ class BABILongDataset(Dataset):
57
+ """Same dataset class as training script for consistency."""
58
+
59
+ def __init__(
60
+ self,
61
+ data_path: str,
62
+ tokenizer,
63
+ max_length: int = 32768,
64
+ answer_reserve_tokens: int = 64,
65
+ max_samples: Optional[int] = None,
66
+ ):
67
+ self.tokenizer = tokenizer
68
+ self.max_length = max_length
69
+ self.answer_reserve_tokens = answer_reserve_tokens
70
+
71
+ logger.info(f"Loading dataset: {data_path}")
72
+ with open(data_path, "r") as f:
73
+ self.data = json.load(f)
74
+
75
+ if max_samples:
76
+ self.data = self.data[:max_samples]
77
+
78
+ logger.info(f"Dataset size: {len(self.data)}")
79
+
80
+ def __len__(self):
81
+ return len(self.data)
82
+
83
+ def __getitem__(self, idx):
84
+ item = self.data[idx]
85
+ text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:"
86
+ target = item["target"]
87
+
88
+ pad_id = self.tokenizer.pad_token_id or 0
89
+ reserve = int(self.answer_reserve_tokens)
90
+
91
+ prompt_ids = self.tokenizer(
92
+ text,
93
+ max_length=max(self.max_length - reserve, 1),
94
+ truncation=True,
95
+ add_special_tokens=True,
96
+ return_tensors="pt",
97
+ ).input_ids.squeeze(0)
98
+
99
+ answer_ids = self.tokenizer(
100
+ f" {target}",
101
+ add_special_tokens=False,
102
+ return_tensors="pt",
103
+ ).input_ids.squeeze(0)
104
+
105
+ available = max(self.max_length - prompt_ids.numel(), 0)
106
+ answer_ids = answer_ids[:available]
107
+
108
+ input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length]
109
+
110
+ labels = torch.full_like(input_ids, fill_value=-100)
111
+ if answer_ids.numel() > 0:
112
+ start = prompt_ids.numel()
113
+ end = min(start + answer_ids.numel(), labels.numel())
114
+ labels[start:end] = input_ids[start:end]
115
+
116
+ seq_len = input_ids.numel()
117
+ if seq_len < self.max_length:
118
+ pad_len = self.max_length - seq_len
119
+ input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id))
120
+ labels = F.pad(labels, (0, pad_len), value=-100)
121
+ attention_mask = torch.cat(
122
+ [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)],
123
+ dim=0,
124
+ )
125
+ else:
126
+ attention_mask = torch.ones(self.max_length, dtype=torch.long)
127
+
128
+ return {
129
+ "input_ids": input_ids.to(dtype=torch.long),
130
+ "labels": labels.to(dtype=torch.long),
131
+ "attention_mask": attention_mask,
132
+ "target_text": target, # keep original target for comparison
133
+ }
134
+
135
+
136
+ def collate_fn(batch):
137
+ # separate target_text from tensor fields
138
+ target_texts = [b.pop("target_text") for b in batch]
139
+ tensor_batch = {k: torch.stack([b[k] for b in batch], dim=0) for k in batch[0].keys()}
140
+ tensor_batch["target_texts"] = target_texts
141
+ return tensor_batch
142
+
143
+
144
+ class QwenChunkwiseEvaluator:
145
+ """
146
+ Evaluates Qwen model using chunk-wise streaming (same as training).
147
+
148
+ Key difference from training: NO memory module, just pure Qwen forward pass.
149
+ Each chunk is processed independently with KV cache reset between samples.
150
+ """
151
+
152
+ def __init__(self, model, tokenizer, config: EvalConfig, device: torch.device):
153
+ self.model = model
154
+ self.tokenizer = tokenizer
155
+ self.config = config
156
+ self.device = device
157
+ self.hidden_size = model.config.hidden_size
158
+
159
+ def _split_into_chunks(self, seq_len: int, chunk_size: int):
160
+ """Split sequence into chunks, same as training."""
161
+ chunks = []
162
+ for start in range(0, seq_len, chunk_size):
163
+ end = min(start + chunk_size, seq_len)
164
+ chunks.append((start, end))
165
+ return chunks
166
+
167
+ @torch.no_grad()
168
+ def evaluate_sample(
169
+ self,
170
+ input_ids: torch.Tensor,
171
+ attention_mask: torch.Tensor,
172
+ labels: torch.Tensor,
173
+ ) -> Dict:
174
+ """
175
+ Evaluate a single sample using chunk-wise streaming.
176
+
177
+ Process:
178
+ 1. Split input into chunks
179
+ 2. Process each chunk through Qwen (with overlap for next-token prediction)
180
+ 3. Collect predictions only for answer tokens (labels != -100)
181
+ 4. Compute loss, token accuracy, and EM accuracy
182
+ """
183
+ batch_size, seq_len = input_ids.shape
184
+ chunk_size = self.config.chunk_size
185
+ chunks = self._split_into_chunks(seq_len, chunk_size)
186
+
187
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
188
+ total_loss_sum = 0.0
189
+ total_loss_tokens = 0
190
+
191
+ pred_tokens: List[int] = []
192
+ target_tokens: List[int] = []
193
+
194
+ for start, end in chunks:
195
+ # Include 1 overlap token for next-token prediction at chunk boundaries
196
+ proc_start = max(0, start - 1)
197
+ chunk_ids = input_ids[:, proc_start:end]
198
+ chunk_labels = labels[:, proc_start:end]
199
+ chunk_mask = attention_mask[:, proc_start:end]
200
+
201
+ # Forward pass through Qwen
202
+ outputs = self.model(
203
+ input_ids=chunk_ids,
204
+ attention_mask=chunk_mask,
205
+ use_cache=False,
206
+ output_hidden_states=False,
207
+ return_dict=True,
208
+ )
209
+ logits = outputs.logits # [batch, seq, vocab]
210
+
211
+ # Compute loss and predictions for answer tokens
212
+ if chunk_labels is not None and (chunk_labels != -100).any():
213
+ # Shift for next-token prediction
214
+ shift_logits = logits[:, :-1, :].contiguous()
215
+ shift_labels = chunk_labels[:, 1:].contiguous()
216
+
217
+ valid = shift_labels != -100
218
+ if valid.any():
219
+ valid_logits = shift_logits[valid]
220
+ valid_targets = shift_labels[valid]
221
+
222
+ # Compute loss
223
+ chunk_loss = loss_fct_sum(valid_logits.float(), valid_targets)
224
+ total_loss_sum += chunk_loss.item()
225
+ total_loss_tokens += valid_targets.numel()
226
+
227
+ # Collect predictions
228
+ pred_ids = torch.argmax(valid_logits, dim=-1)
229
+ pred_tokens.extend(pred_ids.cpu().tolist())
230
+ target_tokens.extend(valid_targets.cpu().tolist())
231
+
232
+ # Compute metrics
233
+ if total_loss_tokens > 0:
234
+ avg_loss = total_loss_sum / total_loss_tokens
235
+ else:
236
+ avg_loss = 0.0
237
+
238
+ # Token accuracy
239
+ if len(pred_tokens) > 0:
240
+ tok_correct = sum(p == t for p, t in zip(pred_tokens, target_tokens))
241
+ tok_acc = tok_correct / len(pred_tokens)
242
+ else:
243
+ tok_acc = 0.0
244
+
245
+ # EM accuracy (exact match of decoded strings)
246
+ if len(pred_tokens) > 0:
247
+ pred_text = self.tokenizer.decode(pred_tokens, skip_special_tokens=True).strip()
248
+ target_text = self.tokenizer.decode(target_tokens, skip_special_tokens=True).strip()
249
+ em_match = (pred_text == target_text)
250
+ else:
251
+ pred_text = ""
252
+ target_text = ""
253
+ em_match = False
254
+
255
+ return {
256
+ "loss": avg_loss,
257
+ "tok_acc": tok_acc,
258
+ "em_match": em_match,
259
+ "pred_text": pred_text,
260
+ "target_text": target_text,
261
+ "num_tokens": len(pred_tokens),
262
+ }
263
+
264
+ @torch.no_grad()
265
+ def evaluate_dataset(self, dataloader: DataLoader, print_examples: int = 10) -> Dict:
266
+ """Evaluate entire dataset."""
267
+ self.model.eval()
268
+
269
+ total_loss = 0.0
270
+ total_batches = 0
271
+ total_tok_correct = 0
272
+ total_tok_total = 0
273
+ total_em_correct = 0
274
+ total_em_total = 0
275
+ printed = 0
276
+
277
+ pbar = tqdm(dataloader, desc="Evaluating", dynamic_ncols=True)
278
+ for batch in pbar:
279
+ input_ids = batch["input_ids"].to(self.device)
280
+ attention_mask = batch["attention_mask"].to(self.device)
281
+ labels = batch["labels"].to(self.device)
282
+ target_texts = batch["target_texts"]
283
+
284
+ # Process each sample in batch
285
+ for i in range(input_ids.shape[0]):
286
+ result = self.evaluate_sample(
287
+ input_ids[i:i+1],
288
+ attention_mask[i:i+1],
289
+ labels[i:i+1],
290
+ )
291
+
292
+ if result["num_tokens"] > 0:
293
+ total_loss += result["loss"]
294
+ total_batches += 1
295
+ total_tok_correct += int(result["tok_acc"] * result["num_tokens"])
296
+ total_tok_total += result["num_tokens"]
297
+ total_em_correct += int(result["em_match"])
298
+ total_em_total += 1
299
+
300
+ # Print examples
301
+ if printed < print_examples:
302
+ logger.info(
303
+ f"[EVAL SAMPLE {printed + 1}] "
304
+ f"pred={repr(result['pred_text'])} | "
305
+ f"label={repr(result['target_text'])} | "
306
+ f"match={result['em_match']}"
307
+ )
308
+ printed += 1
309
+
310
+ # Update progress bar
311
+ if total_em_total > 0:
312
+ pbar.set_postfix({
313
+ "em_acc": f"{total_em_correct / total_em_total * 100:.1f}%",
314
+ "tok_acc": f"{total_tok_correct / max(total_tok_total, 1) * 100:.1f}%",
315
+ })
316
+
317
+ # Compute final metrics
318
+ avg_loss = total_loss / max(total_batches, 1)
319
+ tok_acc = total_tok_correct / max(total_tok_total, 1)
320
+ em_acc = total_em_correct / max(total_em_total, 1)
321
+
322
+ return {
323
+ "loss": avg_loss,
324
+ "tok_acc": tok_acc,
325
+ "em_acc": em_acc,
326
+ "total_samples": total_em_total,
327
+ "total_tokens": total_tok_total,
328
+ }
329
+
330
+
331
+ def main():
332
+ from transformers import AutoModelForCausalLM, AutoTokenizer
333
+
334
+ parser = argparse.ArgumentParser(description="Evaluate Qwen baseline on BABILong")
335
+ parser.add_argument("--model_path", type=str, default=None, help="Path to Qwen model")
336
+ parser.add_argument("--data_path", type=str, default=None, help="Path to BABILong data")
337
+ parser.add_argument("--output_dir", type=str, default=None, help="Output directory")
338
+ parser.add_argument("--max_samples", type=int, default=None, help="Max samples to evaluate")
339
+ parser.add_argument("--chunk_size", type=int, default=None, help="Chunk size for streaming")
340
+ parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
341
+ parser.add_argument("--print_examples", type=int, default=20, help="Number of examples to print")
342
+ parser.add_argument("--eval_split", type=str, default="eval", choices=["train", "eval", "all"],
343
+ help="Which split to evaluate: train (90%), eval (10%), or all")
344
+ args = parser.parse_args()
345
+
346
+ config = EvalConfig()
347
+ if args.model_path:
348
+ config.model_path = args.model_path
349
+ if args.data_path:
350
+ config.data_path = args.data_path
351
+ if args.output_dir:
352
+ config.output_dir = args.output_dir
353
+ if args.max_samples is not None:
354
+ config.max_samples = args.max_samples
355
+ if args.chunk_size is not None:
356
+ config.chunk_size = args.chunk_size
357
+ if args.batch_size:
358
+ config.batch_size = args.batch_size
359
+ if args.print_examples is not None:
360
+ config.print_examples = args.print_examples
361
+
362
+ torch.manual_seed(config.seed)
363
+
364
+ # Device setup
365
+ if torch.cuda.is_available():
366
+ device = torch.device("cuda")
367
+ else:
368
+ device = torch.device("cpu")
369
+
370
+ # TF32 settings
371
+ if torch.cuda.is_available() and config.use_tf32:
372
+ torch.backends.cuda.matmul.allow_tf32 = True
373
+ torch.backends.cudnn.allow_tf32 = True
374
+ try:
375
+ torch.set_float32_matmul_precision("high")
376
+ except Exception:
377
+ pass
378
+
379
+ logger.info("=" * 60)
380
+ logger.info("Qwen3-4B Baseline Evaluation (NO TRAINING)")
381
+ logger.info("=" * 60)
382
+ logger.info(f"model_path: {config.model_path}")
383
+ logger.info(f"data_path: {config.data_path}")
384
+ logger.info(f"output_dir: {config.output_dir}")
385
+ logger.info(f"max_samples: {config.max_samples}")
386
+ logger.info(f"max_length: {config.max_length}")
387
+ logger.info(f"chunk_size: {config.chunk_size}")
388
+ logger.info(f"eval_split: {args.eval_split}")
389
+ logger.info("=" * 60)
390
+
391
+ # Load tokenizer
392
+ logger.info("Loading tokenizer...")
393
+ tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True)
394
+ if tokenizer.pad_token is None:
395
+ tokenizer.pad_token = tokenizer.eos_token
396
+
397
+ # Disable flash-attn checks
398
+ try:
399
+ import transformers
400
+ from transformers.utils import import_utils as _import_utils
401
+
402
+ def _disabled(*args, **kwargs):
403
+ return False
404
+
405
+ _import_utils.is_flash_attn_2_available = _disabled
406
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"):
407
+ transformers.utils.is_flash_attn_2_available = _disabled
408
+ _import_utils.is_torchao_available = _disabled
409
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_torchao_available"):
410
+ transformers.utils.is_torchao_available = _disabled
411
+ except Exception as e:
412
+ logger.warning(f"Disable checks failed (ignored): {e}")
413
+
414
+ # Load model
415
+ logger.info("Loading model...")
416
+ torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32)
417
+ model = AutoModelForCausalLM.from_pretrained(
418
+ config.model_path,
419
+ torch_dtype=torch_dtype,
420
+ device_map=None,
421
+ trust_remote_code=True,
422
+ attn_implementation="sdpa",
423
+ low_cpu_mem_usage=True,
424
+ )
425
+ model.to(device)
426
+ model.config.use_cache = False
427
+ model.eval()
428
+ logger.info(f"Model loaded: {model.config.hidden_size} hidden size, {model.config.num_hidden_layers} layers")
429
+
430
+ # Load dataset
431
+ logger.info("Loading dataset...")
432
+ full_dataset = BABILongDataset(
433
+ config.data_path,
434
+ tokenizer,
435
+ max_length=config.max_length,
436
+ answer_reserve_tokens=config.answer_reserve_tokens,
437
+ max_samples=config.max_samples,
438
+ )
439
+
440
+ # Split dataset same as training (90% train, 10% eval)
441
+ train_size = int(0.9 * len(full_dataset))
442
+ eval_size = len(full_dataset) - train_size
443
+ train_dataset, eval_dataset = torch.utils.data.random_split(
444
+ full_dataset,
445
+ [train_size, eval_size],
446
+ generator=torch.Generator().manual_seed(config.seed),
447
+ )
448
+
449
+ # Select which split to evaluate
450
+ if args.eval_split == "train":
451
+ dataset = train_dataset
452
+ split_name = "train"
453
+ elif args.eval_split == "eval":
454
+ dataset = eval_dataset
455
+ split_name = "eval"
456
+ else: # all
457
+ dataset = full_dataset
458
+ split_name = "all"
459
+
460
+ logger.info(f"Evaluating on '{split_name}' split: {len(dataset)} samples")
461
+
462
+ dataloader = DataLoader(
463
+ dataset,
464
+ batch_size=config.batch_size,
465
+ shuffle=False,
466
+ collate_fn=collate_fn,
467
+ num_workers=0,
468
+ )
469
+
470
+ # Create evaluator
471
+ evaluator = QwenChunkwiseEvaluator(model, tokenizer, config, device)
472
+
473
+ # Run evaluation
474
+ logger.info("Starting evaluation...")
475
+ results = evaluator.evaluate_dataset(dataloader, print_examples=config.print_examples)
476
+
477
+ # Print results
478
+ ppl = math.exp(min(20.0, results["loss"]))
479
+ logger.info("=" * 60)
480
+ logger.info("EVALUATION RESULTS (Qwen Baseline - NO TRAINING)")
481
+ logger.info("=" * 60)
482
+ logger.info(f"Split: {split_name}")
483
+ logger.info(f"Total samples: {results['total_samples']}")
484
+ logger.info(f"Total answer tokens: {results['total_tokens']}")
485
+ logger.info(f"Loss: {results['loss']:.4f}")
486
+ logger.info(f"Perplexity: {ppl:.3f}")
487
+ logger.info(f"Token Accuracy: {results['tok_acc'] * 100:.2f}%")
488
+ logger.info(f"EM Accuracy: {results['em_acc'] * 100:.2f}%")
489
+ logger.info("=" * 60)
490
+
491
+ # Save results
492
+ os.makedirs(config.output_dir, exist_ok=True)
493
+ results_path = os.path.join(config.output_dir, f"baseline_results_{split_name}.json")
494
+ with open(results_path, "w") as f:
495
+ json.dump({
496
+ "split": split_name,
497
+ "total_samples": int(results["total_samples"]),
498
+ "total_tokens": int(results["total_tokens"]),
499
+ "loss": float(results["loss"]),
500
+ "perplexity": float(ppl),
501
+ "tok_acc_pct": float(results["tok_acc"] * 100),
502
+ "em_acc_pct": float(results["em_acc"] * 100),
503
+ "config": {
504
+ "model_path": config.model_path,
505
+ "data_path": config.data_path,
506
+ "max_samples": config.max_samples,
507
+ "max_length": config.max_length,
508
+ "chunk_size": config.chunk_size,
509
+ }
510
+ }, f, indent=2)
511
+ logger.info(f"Results saved to: {results_path}")
512
+
513
+
514
+ if __name__ == "__main__":
515
+ main()
examples/outputs/qwen_titans_babilong_v4/eval_metrics.jsonl ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 1, "train_avg_loss": 18.17474365234375, "eval_loss": 17.120962142944336, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
2
+ {"phase": "epoch", "epoch": 2, "global_step": 2, "train_avg_loss": 17.02557945251465, "eval_loss": 16.274072647094727, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
3
+ {"phase": "epoch", "epoch": 3, "global_step": 3, "train_avg_loss": 16.092069625854492, "eval_loss": 15.3228759765625, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
4
+ {"phase": "epoch", "epoch": 4, "global_step": 4, "train_avg_loss": 15.068357467651367, "eval_loss": 14.723411560058594, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
5
+ {"phase": "epoch", "epoch": 5, "global_step": 5, "train_avg_loss": 14.212869644165039, "eval_loss": 13.746140480041504, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
6
+ {"phase": "epoch", "epoch": 6, "global_step": 6, "train_avg_loss": 13.273895263671875, "eval_loss": 13.325453758239746, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
7
+ {"phase": "epoch", "epoch": 7, "global_step": 7, "train_avg_loss": 12.399141311645508, "eval_loss": 12.49329662322998, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
8
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 15.649271011352539, "eval_loss": 12.257902145385742, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
9
+ {"phase": "epoch", "epoch": 2, "global_step": 14, "train_avg_loss": 8.916175842285156, "eval_loss": 5.23334264755249, "em_acc_pct": 15.384615957736969, "tok_acc_pct": 15.384615957736969}
10
+ {"phase": "epoch", "epoch": 3, "global_step": 21, "train_avg_loss": 3.033076763153076, "eval_loss": 2.6507105827331543, "em_acc_pct": 28.846153616905212, "tok_acc_pct": 28.846153616905212}
11
+ {"phase": "epoch", "epoch": 4, "global_step": 28, "train_avg_loss": 1.8393750190734863, "eval_loss": 1.956390142440796, "em_acc_pct": 30.769231915473938, "tok_acc_pct": 30.769231915473938}
12
+ {"phase": "epoch", "epoch": 5, "global_step": 35, "train_avg_loss": 1.6820091009140015, "eval_loss": 1.9739787578582764, "em_acc_pct": 28.846153616905212, "tok_acc_pct": 28.846153616905212}
examples/qwen_mac_integration.py ADDED
@@ -0,0 +1,692 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MAC (Memory-as-Context) 结构集成到 Qwen 的详细实现
3
+
4
+ === MAC 结构核心原理 ===
5
+
6
+ 1. 将长序列分成多个 segment(如每 128 个 token 一个 segment)
7
+ 2. 在每个 segment 的【开头】插入 longterm_mem_tokens(如 16 个)
8
+ 3. 这些 memory tokens 会参与 attention 计算
9
+ 4. 使用 NeuralMemory 模块来动态更新这些 memory tokens 的内容
10
+
11
+ 原始序列: [t1, t2, t3, ..., t128, t129, ..., t256, ...]
12
+
13
+ MAC 序列: [M1..M16, t1...t128, M1..M16, t129...t256, ...]
14
+ ↑ ↑
15
+ memory tokens memory tokens
16
+
17
+ === Qwen2 架构 ===
18
+
19
+ Qwen2DecoderLayer:
20
+ ├── input_layernorm (RMSNorm)
21
+ ├── self_attn (Qwen2Attention/Qwen2FlashAttention2)
22
+ │ ├── q_proj, k_proj, v_proj
23
+ │ ├── RoPE (rotary positional embedding)
24
+ │ └── o_proj
25
+ ├── post_attention_layernorm (RMSNorm)
26
+ └── mlp (Qwen2MLP: gate_proj, up_proj, down_proj with SiLU)
27
+
28
+ 我们需要在特定层添加 NeuralMemory 模块
29
+ """
30
+
31
+ import torch
32
+ import torch.nn as nn
33
+ import torch.nn.functional as F
34
+ from torch import Tensor
35
+ from typing import Optional, Tuple, List, Dict, Any
36
+ from copy import deepcopy
37
+ from functools import partial
38
+
39
+ from einops import rearrange, repeat, pack, unpack
40
+
41
+ # Titans 组件
42
+ from titans_pytorch import NeuralMemory, MemoryMLP
43
+ from titans_pytorch.neural_memory import NeuralMemState
44
+
45
+
46
+ # ============================================================================
47
+ # 辅助函数
48
+ # ============================================================================
49
+
50
+ def exists(v):
51
+ return v is not None
52
+
53
+ def default(v, d):
54
+ return v if exists(v) else d
55
+
56
+ def divisible_by(num, den):
57
+ return (num % den) == 0
58
+
59
+ def round_up_multiple(seq, mult):
60
+ return ((seq + mult - 1) // mult) * mult
61
+
62
+
63
+ # ============================================================================
64
+ # MAC 风格的 Qwen 实现
65
+ # ============================================================================
66
+
67
+ class QwenMACTransformer(nn.Module):
68
+ """
69
+ 将 MAC 结构应用到 Qwen 模型的完整实现
70
+
71
+ 架构图:
72
+
73
+ Input IDs
74
+
75
+
76
+ ┌─────────────────┐
77
+ │ Token Embed │
78
+ └────────┬────────┘
79
+
80
+
81
+ ┌─────────────────────────────────────────┐
82
+ │ 插入 Memory Tokens 到每个 Segment 开头 │
83
+ │ [M1..Mn, t1..t_seg, M1..Mn, ...] │
84
+ └────────┬────────────────────────────────┘
85
+
86
+
87
+ ╔═════════════════════════════════════════╗
88
+ ║ Qwen Decoder Layer 1 ║
89
+ ║ ┌────────────────────────────────┐ ║
90
+ ║ │ RMSNorm → Self-Attention → Add │ ║
91
+ ║ └────────────────────────────────┘ ║
92
+ ║ ┌────────────────────────────────┐ ║
93
+ ║ │ RMSNorm → MLP → Add │ ║
94
+ ║ └────────────────────────────────┘ ║
95
+ ╚═════════════════════════════════════════╝
96
+
97
+
98
+ ╔═════════════════════════════════════════╗
99
+ ║ Qwen Decoder Layer 2 (with Memory) ║
100
+ ║ ┌────────────────────────────────┐ ║
101
+ ║ │ RMSNorm → Self-Attention → Add │ ║
102
+ ║ └────────────────────────────────┘ ║
103
+ ║ ┌──────────────────────────────────┐ ║
104
+ ║ │ ★ NeuralMemory 记忆增强 ★ │ ║
105
+ ║ │ retrieved = mem(hidden_states) │ ║
106
+ ║ │ hidden += gate * retrieved │ ║
107
+ ║ └──────────────────────────────────┘ ║
108
+ ║ ┌────────────────────────────────┐ ║
109
+ ║ │ RMSNorm → MLP → Add │ ║
110
+ ║ └────────────────────────────────┘ ║
111
+ ╚════════════════════��════════════════════╝
112
+
113
+
114
+ ... 更多层 ...
115
+
116
+
117
+ ┌─────────────────┐
118
+ │ Final RMSNorm │
119
+ └────────┬────────┘
120
+
121
+
122
+ ┌─────────────────┐
123
+ │ LM Head │
124
+ └────────┬────────┘
125
+
126
+
127
+ Logits
128
+ """
129
+
130
+ def __init__(
131
+ self,
132
+ qwen_model,
133
+ # === Segment 配置 ===
134
+ segment_len: int = 128, # 每个 segment 的长度
135
+ num_longterm_mem_tokens: int = 16, # 每个 segment 开头的 memory token 数量
136
+ num_persist_mem_tokens: int = 4, # 全局持久 memory token 数量
137
+
138
+ # === NeuralMemory 配置 ===
139
+ neural_memory_layers: Tuple[int, ...] = (2, 4, 6), # 哪些层使用记忆
140
+ memory_chunk_size: int = 64, # 记忆模块的 chunk 大小
141
+ memory_batch_size: int = 128, # 记忆更新的批次大小
142
+ memory_depth: int = 2, # 记忆 MLP 的深度
143
+
144
+ # === 其他配置 ===
145
+ dim_head: int = 64,
146
+ num_heads: int = None, # 默认从模型配置读取
147
+ use_momentum: bool = True,
148
+ gate_memory_output: bool = False, # 是否用记忆门控 attention 输出
149
+ ):
150
+ super().__init__()
151
+
152
+ # 保存原始 Qwen 模型
153
+ self.qwen = qwen_model
154
+ self.config = qwen_model.config
155
+
156
+ # 获取模型维度
157
+ self.hidden_size = self.config.hidden_size
158
+ self.num_layers = self.config.num_hidden_layers
159
+ num_heads = default(num_heads, self.hidden_size // dim_head)
160
+
161
+ # Segment 配置
162
+ self.segment_len = segment_len
163
+ self.num_longterm_mem_tokens = num_longterm_mem_tokens
164
+ self.num_persist_mem_tokens = num_persist_mem_tokens
165
+ self.total_segment_len = segment_len + num_longterm_mem_tokens
166
+
167
+ # =====================================================================
168
+ # Memory Tokens (这是 MAC 的核心!)
169
+ # =====================================================================
170
+
171
+ # 持久记忆 tokens - 放在序列最前面,所有 segment 共享
172
+ # 用于存储全局上下文信息
173
+ self.persist_mem_tokens = nn.Parameter(
174
+ torch.randn(num_persist_mem_tokens, self.hidden_size) * 0.02
175
+ )
176
+
177
+ # 长期记忆 tokens - 插入到每个 segment 的开头
178
+ # 这些 token 会被 NeuralMemory 动态更新
179
+ self.longterm_mem_tokens = nn.Parameter(
180
+ torch.randn(num_longterm_mem_tokens, self.hidden_size) * 0.02
181
+ )
182
+
183
+ # =====================================================================
184
+ # NeuralMemory 模块
185
+ # =====================================================================
186
+
187
+ self.neural_memory_layers = neural_memory_layers
188
+ self.gate_memory_output = gate_memory_output
189
+
190
+ # 为每个指定层创建 NeuralMemory
191
+ self.neural_memories = nn.ModuleDict()
192
+ self.memory_projections = nn.ModuleDict() # 投影层
193
+ self.memory_gates = nn.ModuleDict() # 门控层
194
+
195
+ # 创建记忆网络模板
196
+ memory_model_template = MemoryMLP(
197
+ dim=dim_head,
198
+ depth=memory_depth,
199
+ expansion_factor=2.0
200
+ )
201
+
202
+ for layer_idx in neural_memory_layers:
203
+ layer_key = str(layer_idx)
204
+
205
+ # NeuralMemory 模块
206
+ self.neural_memories[layer_key] = NeuralMemory(
207
+ dim=self.hidden_size,
208
+ chunk_size=memory_chunk_size,
209
+ batch_size=memory_batch_size,
210
+ dim_head=dim_head,
211
+ heads=num_heads,
212
+ model=deepcopy(memory_model_template),
213
+ momentum=use_momentum,
214
+ momentum_order=1,
215
+ qk_rmsnorm=True,
216
+ pre_rmsnorm=True,
217
+ default_step_transform_max_lr=0.1,
218
+ )
219
+
220
+ # 门控层 - 控制记忆的影响程度
221
+ self.memory_gates[layer_key] = nn.Sequential(
222
+ nn.Linear(self.hidden_size, self.hidden_size),
223
+ nn.Sigmoid()
224
+ )
225
+
226
+ print(f"[QwenMAC] 初始化完成:")
227
+ print(f" - 隐藏层大小: {self.hidden_size}")
228
+ print(f" - 层数: {self.num_layers}")
229
+ print(f" - Segment 长度: {segment_len}")
230
+ print(f" - Longterm Memory Tokens: {num_longterm_mem_tokens}")
231
+ print(f" - Persist Memory Tokens: {num_persist_mem_tokens}")
232
+ print(f" - 记忆层: {neural_memory_layers}")
233
+
234
+ def _insert_memory_tokens(
235
+ self,
236
+ hidden_states: Tensor, # [batch, seq_len, hidden]
237
+ batch_size: int,
238
+ seq_len: int,
239
+ ) -> Tuple[Tensor, int]:
240
+ """
241
+ 在序列中插入 memory tokens
242
+
243
+ 输入: [batch, seq_len, hidden]
244
+ 输出: [batch, new_seq_len, hidden]
245
+
246
+ 处理流程:
247
+ 原始: [t1, t2, ..., t128, t129, ..., t256]
248
+
249
+ 1. 分成 segments:
250
+ Seg1: [t1, ..., t128]
251
+ Seg2: [t129, ..., t256]
252
+
253
+ 2. 每个 segment 前插入 longterm_mem:
254
+ Seg1: [M1, ..., M16, t1, ..., t128]
255
+ Seg2: [M1, ..., M16, t129, ..., t256]
256
+
257
+ 3. 合并 + 前置 persist_mem:
258
+ [P1, ..., P4, M1..M16, t1..t128, M1..M16, t129..t256]
259
+ """
260
+ segment_len = self.segment_len
261
+ num_longterm = self.num_longterm_mem_tokens
262
+ num_persist = self.num_persist_mem_tokens
263
+
264
+ # 计算需要多少个 segment
265
+ num_segments = (seq_len + segment_len - 1) // segment_len
266
+
267
+ # Padding 到 segment_len 的整数倍
268
+ padded_len = num_segments * segment_len
269
+ if seq_len < padded_len:
270
+ padding = padded_len - seq_len
271
+ hidden_states = F.pad(hidden_states, (0, 0, 0, padding))
272
+
273
+ # 重塑为 segments: [batch, num_segments, segment_len, hidden]
274
+ hidden_states = rearrange(
275
+ hidden_states,
276
+ 'b (s n) d -> b s n d',
277
+ s=num_segments,
278
+ n=segment_len
279
+ )
280
+
281
+ # 扩展 longterm memory tokens: [batch, num_segments, num_longterm, hidden]
282
+ longterm_mem = repeat(
283
+ self.longterm_mem_tokens,
284
+ 'n d -> b s n d',
285
+ b=batch_size,
286
+ s=num_segments
287
+ )
288
+
289
+ # 在每个 segment 前插入 memory tokens
290
+ # [batch, num_segments, num_longterm + segment_len, hidden]
291
+ hidden_states = torch.cat([longterm_mem, hidden_states], dim=2)
292
+
293
+ # 展平 segments: [batch, num_segments * (num_longterm + segment_len), hidden]
294
+ hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d')
295
+
296
+ # 添加持久记忆 tokens 在最前面
297
+ persist_mem = repeat(
298
+ self.persist_mem_tokens,
299
+ 'n d -> b n d',
300
+ b=batch_size
301
+ )
302
+ hidden_states = torch.cat([persist_mem, hidden_states], dim=1)
303
+
304
+ new_seq_len = hidden_states.shape[1]
305
+
306
+ return hidden_states, new_seq_len
307
+
308
+ def _remove_memory_tokens(
309
+ self,
310
+ hidden_states: Tensor,
311
+ original_seq_len: int,
312
+ ) -> Tensor:
313
+ """
314
+ 从输出中移除 memory tokens,恢复原始序列长度
315
+ """
316
+ segment_len = self.segment_len
317
+ num_longterm = self.num_longterm_mem_tokens
318
+ num_persist = self.num_persist_mem_tokens
319
+ total_segment_len = segment_len + num_longterm
320
+
321
+ batch_size = hidden_states.shape[0]
322
+
323
+ # 移除 persist tokens
324
+ hidden_states = hidden_states[:, num_persist:]
325
+
326
+ # 计算 segments
327
+ num_segments = (original_seq_len + segment_len - 1) // segment_len
328
+
329
+ # 重塑为 segments
330
+ hidden_states = rearrange(
331
+ hidden_states,
332
+ 'b (s n) d -> b s n d',
333
+ s=num_segments,
334
+ n=total_segment_len
335
+ )
336
+
337
+ # 移除每个 segment 开头的 memory tokens
338
+ hidden_states = hidden_states[:, :, num_longterm:, :]
339
+
340
+ # 展平并截取原始长度
341
+ hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d')
342
+ hidden_states = hidden_states[:, :original_seq_len, :]
343
+
344
+ return hidden_states
345
+
346
+ def _create_mac_attention_mask(
347
+ self,
348
+ seq_len_with_mem: int,
349
+ device: torch.device,
350
+ dtype: torch.dtype,
351
+ ) -> Tensor:
352
+ """
353
+ 创建 MAC 风格的 attention mask
354
+
355
+ MAC mask 的特点:
356
+ 1. Persist memory tokens 对所有位置可见
357
+ 2. 每个 segment 内部是 causal 的
358
+ 3. Memory tokens 可以 attend 到之前的 segment
359
+
360
+ 这是一个简化版本,完整版需要考虑更多细节
361
+ """
362
+ # 创建基础 causal mask
363
+ mask = torch.ones(seq_len_with_mem, seq_len_with_mem, device=device, dtype=dtype)
364
+ mask = torch.tril(mask)
365
+
366
+ # Persist memory 对所有位置可见
367
+ num_persist = self.num_persist_mem_tokens
368
+ mask[:, :num_persist] = 1.0
369
+
370
+ return mask
371
+
372
+ def forward(
373
+ self,
374
+ input_ids: Tensor,
375
+ attention_mask: Optional[Tensor] = None,
376
+ position_ids: Optional[Tensor] = None,
377
+ memory_states: Optional[Dict[str, NeuralMemState]] = None,
378
+ return_memory_states: bool = True,
379
+ **kwargs
380
+ ) -> Dict[str, Any]:
381
+ """
382
+ 前向传播
383
+
384
+ Args:
385
+ input_ids: [batch, seq_len]
386
+ attention_mask: [batch, seq_len]
387
+ memory_states: 各层的记忆状态(用于增量推理)
388
+
389
+ Returns:
390
+ dict with 'logits', 'hidden_states', 'memory_states'
391
+ """
392
+ batch_size, seq_len = input_ids.shape
393
+ device = input_ids.device
394
+
395
+ # =====================================================================
396
+ # Step 1: Token Embedding
397
+ # =====================================================================
398
+ if hasattr(self.qwen.model, 'embed_tokens'):
399
+ hidden_states = self.qwen.model.embed_tokens(input_ids)
400
+ else:
401
+ hidden_states = self.qwen.get_input_embeddings()(input_ids)
402
+
403
+ # =====================================================================
404
+ # Step 2: 插入 Memory Tokens
405
+ # =====================================================================
406
+ hidden_states, seq_len_with_mem = self._insert_memory_tokens(
407
+ hidden_states, batch_size, seq_len
408
+ )
409
+
410
+ # =====================================================================
411
+ # Step 3: 创建 Attention Mask (简化版)
412
+ # =====================================================================
413
+ # 注意: 完整实现需要更复杂的 mask 处理
414
+ # 这里用 None 让模型使用默认 causal mask
415
+ mac_attention_mask = None
416
+
417
+ # =====================================================================
418
+ # Step 4: 逐层处理
419
+ # =====================================================================
420
+ if memory_states is None:
421
+ memory_states = {}
422
+ next_memory_states = {}
423
+
424
+ # 遍历 Qwen 的所有层
425
+ for layer_idx, layer in enumerate(self.qwen.model.layers):
426
+ layer_key = str(layer_idx)
427
+
428
+ # -----------------------------------------------------------------
429
+ # 4.1 标准的 Qwen Decoder Layer 前向传播
430
+ # -----------------------------------------------------------------
431
+ # Qwen2DecoderLayer.forward() 的简化版本
432
+
433
+ residual = hidden_states
434
+
435
+ # Input LayerNorm
436
+ hidden_states = layer.input_layernorm(hidden_states)
437
+
438
+ # Self Attention
439
+ # 注意: 这里简化了 attention 的调用,实际可能需要更多参数
440
+ attn_output = layer.self_attn(
441
+ hidden_states=hidden_states,
442
+ attention_mask=mac_attention_mask,
443
+ position_ids=None, # 会自动生成
444
+ )
445
+
446
+ # 处理不同返回格式
447
+ if isinstance(attn_output, tuple):
448
+ attn_output = attn_output[0]
449
+
450
+ hidden_states = residual + attn_output
451
+
452
+ # -----------------------------------------------------------------
453
+ # 4.2 NeuralMemory 记忆增强 (仅在指定层)
454
+ # -----------------------------------------------------------------
455
+ if layer_key in self.neural_memories:
456
+ neural_mem = self.neural_memories[layer_key]
457
+ gate_fn = self.memory_gates[layer_key]
458
+
459
+ # 获取该层的记忆状态
460
+ mem_state = memory_states.get(layer_key)
461
+
462
+ # 记忆检索和更新
463
+ retrieved, next_mem_state = neural_mem(
464
+ hidden_states,
465
+ state=mem_state
466
+ )
467
+
468
+ # 门控融合
469
+ gate = gate_fn(hidden_states)
470
+
471
+ if self.gate_memory_output:
472
+ # 方式1: 用检索到的记忆门控后续输出
473
+ hidden_states = hidden_states * (1 + gate * retrieved.sigmoid())
474
+ else:
475
+ # 方式2: 直接加上门控后的记忆(更常用)
476
+ hidden_states = hidden_states + gate * retrieved
477
+
478
+ # 保存记忆状态
479
+ next_memory_states[layer_key] = next_mem_state
480
+
481
+ # -----------------------------------------------------------------
482
+ # 4.3 Feed Forward Network
483
+ # -----------------------------------------------------------------
484
+ residual = hidden_states
485
+ hidden_states = layer.post_attention_layernorm(hidden_states)
486
+ hidden_states = layer.mlp(hidden_states)
487
+ hidden_states = residual + hidden_states
488
+
489
+ # =====================================================================
490
+ # Step 5: Final LayerNorm
491
+ # =====================================================================
492
+ hidden_states = self.qwen.model.norm(hidden_states)
493
+
494
+ # =====================================================================
495
+ # Step 6: 移除 Memory Tokens
496
+ # =====================================================================
497
+ hidden_states = self._remove_memory_tokens(hidden_states, seq_len)
498
+
499
+ # =====================================================================
500
+ # Step 7: LM Head
501
+ # =====================================================================
502
+ logits = self.qwen.lm_head(hidden_states)
503
+
504
+ # =====================================================================
505
+ # 返回结果
506
+ # =====================================================================
507
+ result = {
508
+ 'logits': logits,
509
+ 'hidden_states': hidden_states,
510
+ }
511
+
512
+ if return_memory_states:
513
+ result['memory_states'] = next_memory_states
514
+
515
+ return result
516
+
517
+ def generate(
518
+ self,
519
+ input_ids: Tensor,
520
+ max_new_tokens: int = 100,
521
+ temperature: float = 1.0,
522
+ top_p: float = 0.9,
523
+ memory_states: Optional[Dict] = None,
524
+ **kwargs
525
+ ) -> Tensor:
526
+ """
527
+ 简单的生成函数
528
+ """
529
+ generated = input_ids.clone()
530
+
531
+ for _ in range(max_new_tokens):
532
+ outputs = self.forward(
533
+ generated,
534
+ memory_states=memory_states,
535
+ return_memory_states=True
536
+ )
537
+
538
+ logits = outputs['logits'][:, -1, :]
539
+ memory_states = outputs['memory_states']
540
+
541
+ # 采样
542
+ if temperature > 0:
543
+ probs = F.softmax(logits / temperature, dim=-1)
544
+ next_token = torch.multinomial(probs, num_samples=1)
545
+ else:
546
+ next_token = logits.argmax(dim=-1, keepdim=True)
547
+
548
+ generated = torch.cat([generated, next_token], dim=-1)
549
+
550
+ # 检查 EOS
551
+ if hasattr(self.config, 'eos_token_id'):
552
+ if (next_token == self.config.eos_token_id).all():
553
+ break
554
+
555
+ return generated
556
+
557
+
558
+ # ============================================================================
559
+ # 使用示例
560
+ # ============================================================================
561
+
562
+ def main():
563
+ """
564
+ 完整的使用示例
565
+ """
566
+ print("=" * 70)
567
+ print("MAC (Memory-as-Context) 集成到 Qwen 的示例")
568
+ print("=" * 70)
569
+
570
+ # -------------------------------------------------------------------------
571
+ # 方式 1: 使用 Hugging Face 的 Qwen 模型
572
+ # -------------------------------------------------------------------------
573
+
574
+ try:
575
+ from transformers import AutoModelForCausalLM, AutoTokenizer
576
+
577
+ print("\n[1] 加载 Qwen 模型...")
578
+ model_name = "Qwen/Qwen2-0.5B"
579
+
580
+ tokenizer = AutoTokenizer.from_pretrained(
581
+ model_name,
582
+ trust_remote_code=True
583
+ )
584
+
585
+ qwen_model = AutoModelForCausalLM.from_pretrained(
586
+ model_name,
587
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
588
+ device_map="auto" if torch.cuda.is_available() else None,
589
+ trust_remote_code=True
590
+ )
591
+
592
+ print(f" 模型配置:")
593
+ print(f" - hidden_size: {qwen_model.config.hidden_size}")
594
+ print(f" - num_layers: {qwen_model.config.num_hidden_layers}")
595
+ print(f" - num_heads: {qwen_model.config.num_attention_heads}")
596
+
597
+ # 创建 MAC 版本
598
+ print("\n[2] 创建 QwenMAC 模型...")
599
+ mac_model = QwenMACTransformer(
600
+ qwen_model=qwen_model,
601
+ segment_len=64, # 每 64 个 token 一个 segment
602
+ num_longterm_mem_tokens=8, # 每个 segment 8 个 memory token
603
+ num_persist_mem_tokens=4, # 4 个全局 memory token
604
+ neural_memory_layers=(1, 3, 5), # 在第 1, 3, 5 层添加记忆
605
+ memory_chunk_size=32,
606
+ memory_batch_size=64,
607
+ )
608
+
609
+ if torch.cuda.is_available():
610
+ mac_model = mac_model.cuda()
611
+
612
+ # 测试前向传播
613
+ print("\n[3] 测试前向传播...")
614
+ test_text = "人工智能正在改变世界,它可以"
615
+ inputs = tokenizer(test_text, return_tensors="pt")
616
+
617
+ device = next(mac_model.parameters()).device
618
+ input_ids = inputs.input_ids.to(device)
619
+
620
+ with torch.no_grad():
621
+ outputs = mac_model(input_ids)
622
+
623
+ print(f" 输入形状: {input_ids.shape}")
624
+ print(f" 输出 logits 形状: {outputs['logits'].shape}")
625
+ print(f" 记忆状态数量: {len(outputs['memory_states'])}")
626
+
627
+ # 测试生成
628
+ print("\n[4] 测试文本生成...")
629
+ with torch.no_grad():
630
+ generated = mac_model.generate(
631
+ input_ids,
632
+ max_new_tokens=50,
633
+ temperature=0.7
634
+ )
635
+
636
+ generated_text = tokenizer.decode(generated[0], skip_special_tokens=True)
637
+ print(f" 生成文本: {generated_text}")
638
+
639
+ except ImportError as e:
640
+ print(f"\n注意: 需要安装 transformers")
641
+ print(f"pip install transformers")
642
+ print(f"错误: {e}")
643
+
644
+ # -------------------------------------------------------------------------
645
+ # 方式 2: 独立测试 NeuralMemory 组件
646
+ # -------------------------------------------------------------------------
647
+
648
+ print("\n" + "=" * 70)
649
+ print("[独立测试] NeuralMemory 组件")
650
+ print("=" * 70)
651
+
652
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
653
+
654
+ # 创建 NeuralMemory
655
+ mem = NeuralMemory(
656
+ dim=512, # 隐藏维度
657
+ chunk_size=32, # 分块大小
658
+ batch_size=64, # 批次大小
659
+ dim_head=64, # 每个头的维度
660
+ heads=8, # 头数
661
+ model=MemoryMLP(dim=64, depth=2),
662
+ momentum=True,
663
+ qk_rmsnorm=True,
664
+ ).to(device)
665
+
666
+ # 模拟输入
667
+ batch_size = 2
668
+ seq_len = 256
669
+ hidden_dim = 512
670
+
671
+ x = torch.randn(batch_size, seq_len, hidden_dim).to(device)
672
+
673
+ print(f"\n输入形状: {x.shape}")
674
+
675
+ # 第一次前向传播
676
+ retrieved, state = mem(x)
677
+ print(f"检索输出形状: {retrieved.shape}")
678
+ print(f"记忆状态 seq_index: {state.seq_index}")
679
+
680
+ # 第二次前向传播(传入之前的状态)
681
+ x2 = torch.randn(batch_size, seq_len, hidden_dim).to(device)
682
+ retrieved2, state2 = mem(x2, state=state)
683
+ print(f"第二次检索输出形状: {retrieved2.shape}")
684
+ print(f"更新后 seq_index: {state2.seq_index}")
685
+
686
+ print("\n" + "=" * 70)
687
+ print("完成!")
688
+ print("=" * 70)
689
+
690
+
691
+ if __name__ == "__main__":
692
+ main()
examples/qwen_titans_streaming.py ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Qwen + Titans 流式处理超长序列
3
+
4
+ 核心思想:
5
+ - Qwen 作为 Core(短期处理器),每次只处理一个 chunk
6
+ - Titans NeuralMemory 作为长期记忆,跨 chunk 保持状态
7
+ - 虽然 Core 窗口有限(如 4k/8k),但整体能处理任意长度的上下文
8
+
9
+ 处理流程:
10
+ ┌─────────────────────────────────────────────────────────────────────┐
11
+ │ 超长文档 (1M tokens) │
12
+ │ [chunk_0] [chunk_1] [chunk_2] ... [chunk_n-1] [chunk_n + question] │
13
+ └─────────────────────────────────────────────────────────────────────┘
14
+ │ │ │ │ │
15
+ ▼ ▼ ▼ ▼ ▼
16
+ ┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐
17
+ │Qwen │ │Qwen │ │Qwen │ ... │Qwen │ │Qwen │
18
+ │Core │ │Core │ │Core │ │Core │ │Core │
19
+ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘
20
+ │ │ │ │ │
21
+ ▼ ▼ ▼ ▼ ▼
22
+ ┌─────────────────────────────────────────────────────────┐
23
+ │ Titans Long-term Memory │
24
+ │ M_0 ──write──> M_1 ──write──> M_2 ... M_n-1 ──read──> │
25
+ │ │
26
+ │ 存储:关键事实、实体关系、重要信息 │
27
+ │ 检索:回答问题时取回相关记忆 │
28
+ └─────────────────────────────────────────────────────────┘
29
+ """
30
+
31
+ import torch
32
+ import torch.nn as nn
33
+ import torch.nn.functional as F
34
+ from torch import Tensor
35
+ from typing import Optional, List, Dict, Any, Tuple
36
+ from dataclasses import dataclass
37
+ from tqdm import tqdm
38
+ import math
39
+
40
+ from einops import rearrange, repeat
41
+
42
+ # Titans 组件
43
+ from titans_pytorch import NeuralMemory, MemoryMLP
44
+ from titans_pytorch.neural_memory import NeuralMemState
45
+
46
+
47
+ @dataclass
48
+ class StreamingConfig:
49
+ """流式处理配置"""
50
+ chunk_size: int = 4096 # 每个 chunk 的 token 数
51
+ memory_chunk_size: int = 64 # NeuralMemory 内部的 chunk 大小
52
+ memory_batch_size: int = 128 # NeuralMemory 的 batch size
53
+ num_memory_tokens: int = 16 # 每次注入的 memory token 数量
54
+ overlap_size: int = 128 # chunk 之间的重叠(可选,帮助上下文连贯)
55
+
56
+
57
+ class TitansLongTermMemory(nn.Module):
58
+ """
59
+ Titans 长期记忆模块
60
+
61
+ 负责:
62
+ 1. 将 chunk 的信息写入长期记忆
63
+ 2. 从长期记忆中检索相关信息
64
+ 3. 生成 memory tokens 注入到 Core 中
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ hidden_size: int,
70
+ chunk_size: int = 64,
71
+ batch_size: int = 128,
72
+ dim_head: int = 64,
73
+ num_heads: int = None,
74
+ memory_depth: int = 2,
75
+ ):
76
+ super().__init__()
77
+
78
+ self.hidden_size = hidden_size
79
+ num_heads = num_heads or (hidden_size // dim_head)
80
+
81
+ # 创建记忆网络
82
+ memory_model = MemoryMLP(
83
+ dim=dim_head,
84
+ depth=memory_depth,
85
+ expansion_factor=2.0
86
+ )
87
+
88
+ # NeuralMemory - 这是长期记忆的核心
89
+ self.neural_memory = NeuralMemory(
90
+ dim=hidden_size,
91
+ chunk_size=chunk_size,
92
+ batch_size=batch_size,
93
+ dim_head=dim_head,
94
+ heads=num_heads,
95
+ model=memory_model,
96
+ momentum=True,
97
+ momentum_order=1,
98
+ qk_rmsnorm=True,
99
+ pre_rmsnorm=True,
100
+ default_step_transform_max_lr=0.1,
101
+ )
102
+
103
+ # Memory tokens - 可学习的查询向量
104
+ # 用于从长期记忆中检索信息
105
+ self.memory_query_tokens = nn.Parameter(
106
+ torch.randn(1, 16, hidden_size) * 0.02
107
+ )
108
+
109
+ # 投影层:将检索结果转换为适合注入 Core 的格式
110
+ self.memory_proj = nn.Sequential(
111
+ nn.LayerNorm(hidden_size),
112
+ nn.Linear(hidden_size, hidden_size),
113
+ nn.GELU(),
114
+ nn.Linear(hidden_size, hidden_size),
115
+ )
116
+
117
+ def write(
118
+ self,
119
+ hidden_states: Tensor, # [batch, seq_len, hidden]
120
+ state: Optional[NeuralMemState] = None,
121
+ ) -> Tuple[Tensor, NeuralMemState]:
122
+ """
123
+ 将当前 chunk 的信息写入长期记忆
124
+
125
+ Args:
126
+ hidden_states: 当前 chunk 经过 Qwen 处理后的隐藏状态
127
+ state: 之前的记忆状态
128
+
129
+ Returns:
130
+ retrieved: 从记忆中检索到的信息
131
+ next_state: 更新后的记忆状态
132
+ """
133
+ # NeuralMemory 同时执行 store(写入)和 retrieve(读取)
134
+ retrieved, next_state = self.neural_memory(
135
+ hidden_states,
136
+ state=state
137
+ )
138
+
139
+ return retrieved, next_state
140
+
141
+ def read(
142
+ self,
143
+ batch_size: int,
144
+ state: NeuralMemState,
145
+ num_tokens: int = 16,
146
+ ) -> Tensor:
147
+ """
148
+ 从长期记忆中读取信息,生成 memory tokens
149
+
150
+ 这些 tokens 会被注入到 Core 的输入中
151
+ """
152
+ # 扩展 query tokens
153
+ queries = repeat(
154
+ self.memory_query_tokens[:, :num_tokens],
155
+ '1 n d -> b n d',
156
+ b=batch_size
157
+ )
158
+
159
+ # 使用 query tokens 从记忆中检索
160
+ retrieved, _ = self.neural_memory(
161
+ queries,
162
+ state=state
163
+ )
164
+
165
+ # 投影
166
+ memory_tokens = self.memory_proj(retrieved)
167
+
168
+ return memory_tokens
169
+
170
+
171
+ class QwenTitansStreaming(nn.Module):
172
+ """
173
+ Qwen + Titans 流式处理模型
174
+
175
+ 能够处理任意长度的序列,通过:
176
+ 1. 将序列分成 chunks
177
+ 2. 每个 chunk 用 Qwen Core 处理
178
+ 3. 用 Titans 长期记忆跨 chunk 传递信息
179
+ """
180
+
181
+ def __init__(
182
+ self,
183
+ qwen_model,
184
+ config: StreamingConfig = None,
185
+ ):
186
+ super().__init__()
187
+
188
+ self.qwen = qwen_model
189
+ self.config = config or StreamingConfig()
190
+ self.hidden_size = qwen_model.config.hidden_size
191
+
192
+ # 长期记忆模块
193
+ self.long_term_memory = TitansLongTermMemory(
194
+ hidden_size=self.hidden_size,
195
+ chunk_size=self.config.memory_chunk_size,
196
+ batch_size=self.config.memory_batch_size,
197
+ )
198
+
199
+ # 记忆融合门控
200
+ self.memory_gate = nn.Sequential(
201
+ nn.Linear(self.hidden_size * 2, self.hidden_size),
202
+ nn.Sigmoid()
203
+ )
204
+
205
+ print(f"[QwenTitansStreaming] 初始化完成:")
206
+ print(f" - Chunk size: {self.config.chunk_size}")
207
+ print(f" - Memory chunk size: {self.config.memory_chunk_size}")
208
+ print(f" - Memory batch size: {self.config.memory_batch_size}")
209
+ print(f" - Overlap size: {self.config.overlap_size}")
210
+
211
+ def _split_into_chunks(
212
+ self,
213
+ input_ids: Tensor,
214
+ chunk_size: int,
215
+ overlap: int = 0,
216
+ ) -> List[Tensor]:
217
+ """
218
+ 将输入序列分成 chunks
219
+
220
+ Args:
221
+ input_ids: [batch, seq_len]
222
+ chunk_size: 每个 chunk 的大小
223
+ overlap: chunk 之间的重叠
224
+
225
+ Returns:
226
+ List of chunks, each [batch, chunk_size]
227
+ """
228
+ batch_size, seq_len = input_ids.shape
229
+ chunks = []
230
+
231
+ stride = chunk_size - overlap
232
+
233
+ for start in range(0, seq_len, stride):
234
+ end = min(start + chunk_size, seq_len)
235
+ chunk = input_ids[:, start:end]
236
+
237
+ # Padding if needed
238
+ if chunk.shape[1] < chunk_size:
239
+ pad_len = chunk_size - chunk.shape[1]
240
+ chunk = F.pad(chunk, (0, pad_len), value=0)
241
+
242
+ chunks.append(chunk)
243
+
244
+ if end >= seq_len:
245
+ break
246
+
247
+ return chunks
248
+
249
+ def process_document(
250
+ self,
251
+ input_ids: Tensor,
252
+ attention_mask: Optional[Tensor] = None,
253
+ return_all_hidden_states: bool = False,
254
+ show_progress: bool = True,
255
+ ) -> Dict[str, Any]:
256
+ """
257
+ 流式处理整个文档
258
+
259
+ 这是核心方法:
260
+ 1. 将文档分成 chunks
261
+ 2. 逐个 chunk 处理
262
+ 3. 每个 chunk 后更新长期记忆
263
+
264
+ Args:
265
+ input_ids: [batch, seq_len] - 可以是任意长度!
266
+ attention_mask: [batch, seq_len]
267
+
268
+ Returns:
269
+ 包含最终隐藏状态、记忆状态等的字典
270
+ """
271
+ batch_size, total_seq_len = input_ids.shape
272
+ device = input_ids.device
273
+
274
+ # 分成 chunks
275
+ chunks = self._split_into_chunks(
276
+ input_ids,
277
+ self.config.chunk_size,
278
+ self.config.overlap_size
279
+ )
280
+
281
+ num_chunks = len(chunks)
282
+ print(f"[process_document] 总长度: {total_seq_len}, 分成 {num_chunks} 个 chunks")
283
+
284
+ # 初始化记忆状态
285
+ memory_state = None
286
+ all_hidden_states = []
287
+
288
+ # 逐个 chunk 处理
289
+ iterator = tqdm(enumerate(chunks), total=num_chunks, desc="Processing chunks") \
290
+ if show_progress else enumerate(chunks)
291
+
292
+ for chunk_idx, chunk_ids in iterator:
293
+ # =========================================================
294
+ # Step 1: 从长期记忆读取 memory tokens(除了第一个 chunk)
295
+ # =========================================================
296
+ memory_tokens = None
297
+ if memory_state is not None and chunk_idx > 0:
298
+ memory_tokens = self.long_term_memory.read(
299
+ batch_size=batch_size,
300
+ state=memory_state,
301
+ num_tokens=self.config.num_memory_tokens
302
+ )
303
+
304
+ # =========================================================
305
+ # Step 2: 用 Qwen Core 处理当前 chunk
306
+ # =========================================================
307
+ chunk_hidden = self._process_chunk_with_memory(
308
+ chunk_ids,
309
+ memory_tokens=memory_tokens,
310
+ )
311
+
312
+ # =========================================================
313
+ # Step 3: 将当前 chunk 的信息写入长期记忆
314
+ # =========================================================
315
+ _, memory_state = self.long_term_memory.write(
316
+ chunk_hidden,
317
+ state=memory_state
318
+ )
319
+
320
+ if return_all_hidden_states:
321
+ all_hidden_states.append(chunk_hidden)
322
+
323
+ # 返回结果
324
+ result = {
325
+ 'last_hidden_states': chunk_hidden,
326
+ 'memory_state': memory_state,
327
+ 'num_chunks_processed': num_chunks,
328
+ }
329
+
330
+ if return_all_hidden_states:
331
+ result['all_hidden_states'] = all_hidden_states
332
+
333
+ return result
334
+
335
+ def _process_chunk_with_memory(
336
+ self,
337
+ chunk_ids: Tensor,
338
+ memory_tokens: Optional[Tensor] = None,
339
+ ) -> Tensor:
340
+ """
341
+ 处理单个 chunk,可选地注入 memory tokens
342
+
343
+ Args:
344
+ chunk_ids: [batch, chunk_size]
345
+ memory_tokens: [batch, num_mem_tokens, hidden] - 从长期记忆检索的
346
+
347
+ Returns:
348
+ hidden_states: [batch, chunk_size, hidden]
349
+ """
350
+ batch_size = chunk_ids.shape[0]
351
+
352
+ # 获取 token embeddings
353
+ if hasattr(self.qwen.model, 'embed_tokens'):
354
+ hidden_states = self.qwen.model.embed_tokens(chunk_ids)
355
+ else:
356
+ hidden_states = self.qwen.get_input_embeddings()(chunk_ids)
357
+
358
+ # 如果有 memory tokens,将其拼接到输入前面
359
+ if memory_tokens is not None:
360
+ # [batch, num_mem + chunk_size, hidden]
361
+ hidden_states = torch.cat([memory_tokens, hidden_states], dim=1)
362
+
363
+ # 通过 Qwen 的所有层
364
+ for layer in self.qwen.model.layers:
365
+ layer_output = layer(hidden_states, attention_mask=None)
366
+ if isinstance(layer_output, tuple):
367
+ hidden_states = layer_output[0]
368
+ else:
369
+ hidden_states = layer_output
370
+
371
+ # Final norm
372
+ hidden_states = self.qwen.model.norm(hidden_states)
373
+
374
+ # 如果添加了 memory tokens,需要移除它们
375
+ if memory_tokens is not None:
376
+ num_mem = memory_tokens.shape[1]
377
+ hidden_states = hidden_states[:, num_mem:]
378
+
379
+ return hidden_states
380
+
381
+ def generate_answer(
382
+ self,
383
+ question_ids: Tensor,
384
+ memory_state: NeuralMemState,
385
+ max_new_tokens: int = 100,
386
+ temperature: float = 0.7,
387
+ ) -> Tensor:
388
+ """
389
+ 基于长期记忆生成答案
390
+
391
+ 关键:虽然 Core 只看到问题,但它能从长期记忆中
392
+ 检索到之前 1M tokens 中的相关事实!
393
+
394
+ Args:
395
+ question_ids: [batch, question_len] - 问题的 token ids
396
+ memory_state: 处理完整个文档后的记忆状态
397
+ max_new_tokens: 最大生成长度
398
+
399
+ Returns:
400
+ generated_ids: [batch, question_len + generated_len]
401
+ """
402
+ batch_size = question_ids.shape[0]
403
+ generated = question_ids.clone()
404
+
405
+ for _ in range(max_new_tokens):
406
+ # 从长期记忆读取相关信息
407
+ memory_tokens = self.long_term_memory.read(
408
+ batch_size=batch_size,
409
+ state=memory_state,
410
+ num_tokens=self.config.num_memory_tokens
411
+ )
412
+
413
+ # 处理当前序列 + memory tokens
414
+ hidden = self._process_chunk_with_memory(
415
+ generated,
416
+ memory_tokens=memory_tokens
417
+ )
418
+
419
+ # 预测下一个 token
420
+ logits = self.qwen.lm_head(hidden[:, -1:, :])
421
+
422
+ if temperature > 0:
423
+ probs = F.softmax(logits.squeeze(1) / temperature, dim=-1)
424
+ next_token = torch.multinomial(probs, num_samples=1)
425
+ else:
426
+ next_token = logits.squeeze(1).argmax(dim=-1, keepdim=True)
427
+
428
+ generated = torch.cat([generated, next_token], dim=1)
429
+
430
+ # 检查 EOS
431
+ if hasattr(self.qwen.config, 'eos_token_id'):
432
+ if (next_token == self.qwen.config.eos_token_id).all():
433
+ break
434
+
435
+ return generated
436
+
437
+
438
+ # ============================================================================
439
+ # BABILong 风格的使用示例
440
+ # ============================================================================
441
+
442
+ def babilong_style_example():
443
+ """
444
+ 演示如何用 Qwen + Titans 处理 BABILong 风格的超长序列任务
445
+
446
+ BABILong 任务结构:
447
+ - 前面是很长的背景文档(可能 1M tokens)
448
+ - 最后是一个问题
449
+ - 需要从文档中找到相关事实来回答
450
+ """
451
+
452
+ print("=" * 70)
453
+ print("Qwen + Titans 流式处理超长序列示例")
454
+ print("=" * 70)
455
+
456
+ try:
457
+ from transformers import AutoModelForCausalLM, AutoTokenizer
458
+
459
+ # 加载模型
460
+ print("\n[1] 加载 Qwen 模型...")
461
+ model_name = "Qwen/Qwen2-0.5B" # 或 Qwen/Qwen3-4B
462
+
463
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
464
+ qwen_model = AutoModelForCausalLM.from_pretrained(
465
+ model_name,
466
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
467
+ device_map="auto" if torch.cuda.is_available() else None,
468
+ trust_remote_code=True
469
+ )
470
+
471
+ # 创建流式处理模型
472
+ print("\n[2] 创建 QwenTitansStreaming 模型...")
473
+ config = StreamingConfig(
474
+ chunk_size=2048, # 每次处理 2k tokens
475
+ memory_chunk_size=64,
476
+ memory_batch_size=128,
477
+ num_memory_tokens=16,
478
+ overlap_size=64, # chunk 间 64 token 重叠
479
+ )
480
+
481
+ model = QwenTitansStreaming(qwen_model, config)
482
+
483
+ if torch.cuda.is_available():
484
+ model = model.cuda()
485
+
486
+ # =====================================================================
487
+ # 模拟 BABILong 任务
488
+ # =====================================================================
489
+ print("\n[3] 模拟 BABILong 任务...")
490
+
491
+ # 模拟一个长文档(实际可能有几十万 tokens)
492
+ long_document = """
493
+ 这是一个关于人工智能发展历史的长篇文档。
494
+
495
+ 第一章:早期发展
496
+ 人工智能的概念最早可以追溯到 1950 年代。1956 年的达特茅斯会议
497
+ 被认为是人工智能作为一门学科正式诞生的标志。
498
+
499
+ [这里假设有很多很多内容...]
500
+
501
+ 重要事实:达特茅斯会议在 1956 年举行。
502
+
503
+ [更多内容...]
504
+
505
+ 第五十章:现代发展
506
+ 2022 年,大型语言模型取得了突破性进展。
507
+
508
+ 重要事实:GPT-4 在 2023 年发布。
509
+
510
+ [更多内容...]
511
+ """
512
+
513
+ # 复制文档以模拟超长序列
514
+ # 实际使用时这里会是真正的长文档
515
+ very_long_document = long_document * 100 # 模拟长文档
516
+
517
+ question = "\n问题:达特茅斯会议是在哪一年举行的?"
518
+ full_input = very_long_document + question
519
+
520
+ # Tokenize
521
+ print(f" 文档长度(字符): {len(full_input)}")
522
+ inputs = tokenizer(full_input, return_tensors="pt")
523
+ input_ids = inputs.input_ids
524
+ print(f" 文档长度(tokens): {input_ids.shape[1]}")
525
+
526
+ device = next(model.parameters()).device
527
+ input_ids = input_ids.to(device)
528
+
529
+ # 流式处理
530
+ print("\n[4] 流式处理文档...")
531
+ with torch.no_grad():
532
+ result = model.process_document(
533
+ input_ids,
534
+ show_progress=True
535
+ )
536
+
537
+ print(f"\n 处理完成!")
538
+ print(f" - 处理了 {result['num_chunks_processed']} 个 chunks")
539
+ print(f" - 记忆状态 seq_index: {result['memory_state'].seq_index}")
540
+
541
+ # 生成答案(实际场景)
542
+ # print("\n[5] 基于长期记忆生成答案...")
543
+ # answer = model.generate_answer(
544
+ # question_ids,
545
+ # memory_state=result['memory_state'],
546
+ # max_new_tokens=50
547
+ # )
548
+
549
+ except ImportError as e:
550
+ print(f"\n需要安装依赖: pip install transformers")
551
+ print(f"错误: {e}")
552
+
553
+ # =========================================================================
554
+ # 独立测试
555
+ # =========================================================================
556
+ print("\n" + "=" * 70)
557
+ print("[独立测试] Titans 长期记忆模块")
558
+ print("=" * 70)
559
+
560
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
561
+
562
+ # 创建长期记忆模块
563
+ ltm = TitansLongTermMemory(
564
+ hidden_size=512,
565
+ chunk_size=64,
566
+ batch_size=128,
567
+ dim_head=64,
568
+ num_heads=8,
569
+ ).to(device)
570
+
571
+ # 模拟多个 chunk 的处理
572
+ batch_size = 2
573
+ chunk_size = 256
574
+ hidden_dim = 512
575
+ num_chunks = 5
576
+
577
+ print(f"\n模拟处理 {num_chunks} 个 chunks:")
578
+
579
+ memory_state = None
580
+ for i in range(num_chunks):
581
+ # 模拟当前 chunk 的隐藏状态
582
+ chunk_hidden = torch.randn(batch_size, chunk_size, hidden_dim).to(device)
583
+
584
+ # 写入长期记忆
585
+ retrieved, memory_state = ltm.write(chunk_hidden, state=memory_state)
586
+
587
+ print(f" Chunk {i}: 写入完成, seq_index = {memory_state.seq_index}")
588
+
589
+ # 从记忆中读取
590
+ print(f"\n从长期记忆中读取:")
591
+ memory_tokens = ltm.read(batch_size, memory_state, num_tokens=16)
592
+ print(f" Memory tokens 形状: {memory_tokens.shape}")
593
+
594
+ print("\n" + "=" * 70)
595
+ print("完成!")
596
+ print("=" * 70)
597
+
598
+
599
+ if __name__ == "__main__":
600
+ babilong_style_example()
examples/qwen_with_titans_memory.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Titans Neural Memory 与 Qwen 模型集成示例
3
+
4
+ 本文件展示了如何将 Titans 的 NeuralMemory 模块集成到 Qwen 模型中,
5
+ 以增强其长期记忆能力。
6
+
7
+ 主要集成方案:
8
+ 1. 作为独立的记忆增强模块(Memory Augmented)
9
+ 2. 替换/增强特定层的注意力机制
10
+ 3. Memory-as-Context 方式(类似 MAC Transformer)
11
+ """
12
+
13
+ import torch
14
+ import torch.nn as nn
15
+ from torch import Tensor
16
+ from typing import Optional, Tuple
17
+ from einops import rearrange, repeat
18
+ from copy import deepcopy
19
+
20
+ # 导入 Titans 的核心组件
21
+ from titans_pytorch import NeuralMemory, MemoryMLP, NeuralMemState
22
+
23
+
24
+ # ============================================================================
25
+ # 方案 1: 简单的记忆增强包装器 (Memory Augmented Wrapper)
26
+ # ============================================================================
27
+
28
+ class TitansMemoryWrapper(nn.Module):
29
+ """
30
+ 最简单的集成方式:在 Qwen 模型外部添加 Titans 记忆模块
31
+
32
+ 工作原理:
33
+ 1. 使用 NeuralMemory 存储和检索长期信息
34
+ 2. 将检索到的记忆与 Qwen 的输出融合
35
+
36
+ 适用场景:
37
+ - 不想修改 Qwen 内部结构
38
+ - 需要快速验证 Titans 记忆的效果
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ qwen_model,
44
+ hidden_size: int = 896, # Qwen2-0.5B 的隐藏层大小
45
+ chunk_size: int = 64,
46
+ memory_batch_size: int = 128,
47
+ num_heads: int = 4,
48
+ dim_head: int = 64,
49
+ memory_depth: int = 2,
50
+ ):
51
+ super().__init__()
52
+ self.qwen = qwen_model
53
+
54
+ # 投影层:将 Qwen 的隐藏状态投影到记忆维度
55
+ self.mem_dim = dim_head * num_heads
56
+ self.to_mem_input = nn.Linear(hidden_size, self.mem_dim)
57
+ self.from_mem_output = nn.Linear(self.mem_dim, hidden_size)
58
+
59
+ # 创建 Titans 记忆模块
60
+ memory_model = MemoryMLP(
61
+ dim=dim_head,
62
+ depth=memory_depth,
63
+ expansion_factor=2.0
64
+ )
65
+
66
+ self.neural_memory = NeuralMemory(
67
+ dim=self.mem_dim,
68
+ chunk_size=chunk_size,
69
+ batch_size=memory_batch_size,
70
+ dim_head=dim_head,
71
+ heads=num_heads,
72
+ model=memory_model,
73
+ momentum=True,
74
+ momentum_order=1,
75
+ qk_rmsnorm=True,
76
+ )
77
+
78
+ # 融合门控
79
+ self.fusion_gate = nn.Sequential(
80
+ nn.Linear(hidden_size * 2, hidden_size),
81
+ nn.Sigmoid()
82
+ )
83
+
84
+ def forward(
85
+ self,
86
+ input_ids: Tensor,
87
+ attention_mask: Optional[Tensor] = None,
88
+ memory_state: Optional[NeuralMemState] = None,
89
+ **kwargs
90
+ ):
91
+ # 获取 Qwen 的隐藏状态
92
+ qwen_outputs = self.qwen(
93
+ input_ids=input_ids,
94
+ attention_mask=attention_mask,
95
+ output_hidden_states=True,
96
+ **kwargs
97
+ )
98
+
99
+ hidden_states = qwen_outputs.hidden_states[-1] # 最后一层隐藏状态
100
+
101
+ # 投影到记忆空间
102
+ mem_input = self.to_mem_input(hidden_states)
103
+
104
+ # 使用 Titans 记忆模块存储和检索
105
+ retrieved, next_memory_state = self.neural_memory(
106
+ mem_input,
107
+ state=memory_state
108
+ )
109
+
110
+ # 投影回原始维度
111
+ retrieved_hidden = self.from_mem_output(retrieved)
112
+
113
+ # 门控融合
114
+ gate = self.fusion_gate(torch.cat([hidden_states, retrieved_hidden], dim=-1))
115
+ enhanced_hidden = hidden_states + gate * retrieved_hidden
116
+
117
+ # 使用增强的隐藏状态计算 logits
118
+ # 注意:这里需要访问 Qwen 的 lm_head
119
+ if hasattr(self.qwen, 'lm_head'):
120
+ logits = self.qwen.lm_head(enhanced_hidden)
121
+ else:
122
+ logits = qwen_outputs.logits
123
+
124
+ return {
125
+ 'logits': logits,
126
+ 'hidden_states': enhanced_hidden,
127
+ 'memory_state': next_memory_state,
128
+ 'qwen_outputs': qwen_outputs
129
+ }
130
+
131
+
132
+ # ============================================================================
133
+ # 方案 2: 将 Titans 记忆嵌入到 Qwen 的特定层中
134
+ # ============================================================================
135
+
136
+ class QwenDecoderLayerWithMemory(nn.Module):
137
+ """
138
+ 修改后的 Qwen Decoder 层,集成了 Titans 记忆模块
139
+
140
+ 在每个 attention 层后添加记忆检索和更新
141
+ """
142
+
143
+ def __init__(
144
+ self,
145
+ original_layer,
146
+ hidden_size: int,
147
+ chunk_size: int = 64,
148
+ memory_batch_size: int = 128,
149
+ num_heads: int = 4,
150
+ dim_head: int = 64,
151
+ ):
152
+ super().__init__()
153
+
154
+ # 保留原始层的组件
155
+ self.self_attn = original_layer.self_attn
156
+ self.mlp = original_layer.mlp
157
+ self.input_layernorm = original_layer.input_layernorm
158
+ self.post_attention_layernorm = original_layer.post_attention_layernorm
159
+
160
+ # 添加 Titans 记忆模块
161
+ self.mem_dim = dim_head * num_heads
162
+ self.to_mem = nn.Linear(hidden_size, self.mem_dim)
163
+ self.from_mem = nn.Linear(self.mem_dim, hidden_size)
164
+
165
+ memory_model = MemoryMLP(dim=dim_head, depth=2)
166
+
167
+ self.neural_memory = NeuralMemory(
168
+ dim=self.mem_dim,
169
+ chunk_size=chunk_size,
170
+ batch_size=memory_batch_size,
171
+ dim_head=dim_head,
172
+ heads=num_heads,
173
+ model=memory_model,
174
+ momentum=True,
175
+ )
176
+
177
+ # 记忆输出的门控
178
+ self.mem_gate = nn.Sequential(
179
+ nn.Linear(hidden_size, hidden_size),
180
+ nn.Sigmoid()
181
+ )
182
+
183
+ def forward(
184
+ self,
185
+ hidden_states: Tensor,
186
+ attention_mask: Optional[Tensor] = None,
187
+ position_ids: Optional[Tensor] = None,
188
+ memory_state: Optional[NeuralMemState] = None,
189
+ **kwargs
190
+ ):
191
+ # 标准的 attention 前向传播
192
+ residual = hidden_states
193
+ hidden_states = self.input_layernorm(hidden_states)
194
+
195
+ attn_output, attn_weights, _ = self.self_attn(
196
+ hidden_states=hidden_states,
197
+ attention_mask=attention_mask,
198
+ position_ids=position_ids,
199
+ **kwargs
200
+ )
201
+ hidden_states = residual + attn_output
202
+
203
+ # === Titans 记忆增强 ===
204
+ mem_input = self.to_mem(hidden_states)
205
+ retrieved, next_memory_state = self.neural_memory(
206
+ mem_input,
207
+ state=memory_state
208
+ )
209
+ mem_output = self.from_mem(retrieved)
210
+
211
+ # 门控融合记忆
212
+ gate = self.mem_gate(hidden_states)
213
+ hidden_states = hidden_states + gate * mem_output
214
+ # ========================
215
+
216
+ # 标准的 FFN 前向传播
217
+ residual = hidden_states
218
+ hidden_states = self.post_attention_layernorm(hidden_states)
219
+ hidden_states = self.mlp(hidden_states)
220
+ hidden_states = residual + hidden_states
221
+
222
+ return hidden_states, next_memory_state
223
+
224
+
225
+ # ============================================================================
226
+ # 方案 3: Memory-as-Context 方式(最接近原论文)
227
+ # ============================================================================
228
+
229
+ class QwenWithMAC(nn.Module):
230
+ """
231
+ Memory-as-Context 方式集成 Titans 到 Qwen
232
+
233
+ 核心思想:
234
+ 1. 将长序列分成多个 segment
235
+ 2. 每个 segment 的开头添加 longterm memory tokens
236
+ 3. 使用 NeuralMemory 来更新这些 memory tokens
237
+
238
+ 这种方式最接近 Titans 论文中的 MAC 配置
239
+ """
240
+
241
+ def __init__(
242
+ self,
243
+ qwen_model,
244
+ hidden_size: int = 896,
245
+ segment_len: int = 128,
246
+ num_longterm_mem_tokens: int = 16,
247
+ num_persist_mem_tokens: int = 4,
248
+ memory_layers: Tuple[int, ...] = (2, 4, 6),
249
+ chunk_size: int = 64,
250
+ memory_batch_size: int = 128,
251
+ ):
252
+ super().__init__()
253
+
254
+ self.qwen = qwen_model
255
+ self.hidden_size = hidden_size
256
+ self.segment_len = segment_len
257
+ self.num_longterm_mem_tokens = num_longterm_mem_tokens
258
+
259
+ # 持久记忆 tokens(全局共享)
260
+ self.persist_mem = nn.Parameter(
261
+ torch.randn(num_persist_mem_tokens, hidden_size) * 0.02
262
+ )
263
+
264
+ # 长期记忆 tokens(每个 segment 独立)
265
+ self.longterm_mem = nn.Parameter(
266
+ torch.randn(num_longterm_mem_tokens, hidden_size) * 0.02
267
+ )
268
+
269
+ # 为指定层创建 NeuralMemory 模块
270
+ self.memory_layers = memory_layers
271
+ self.neural_memories = nn.ModuleDict()
272
+
273
+ memory_model = MemoryMLP(dim=64, depth=2)
274
+
275
+ for layer_idx in memory_layers:
276
+ self.neural_memories[str(layer_idx)] = NeuralMemory(
277
+ dim=hidden_size,
278
+ chunk_size=chunk_size,
279
+ batch_size=memory_batch_size,
280
+ dim_head=64,
281
+ heads=hidden_size // 64,
282
+ model=deepcopy(memory_model),
283
+ momentum=True,
284
+ qk_rmsnorm=True,
285
+ )
286
+
287
+ def prepare_inputs_with_memory(
288
+ self,
289
+ hidden_states: Tensor,
290
+ batch_size: int,
291
+ ) -> Tensor:
292
+ """
293
+ 在每个 segment 开头插入 memory tokens
294
+ """
295
+ seq_len = hidden_states.shape[1]
296
+ num_segments = (seq_len + self.segment_len - 1) // self.segment_len
297
+
298
+ # 扩展 longterm memory
299
+ longterm = repeat(
300
+ self.longterm_mem,
301
+ 'n d -> b s n d',
302
+ b=batch_size,
303
+ s=num_segments
304
+ )
305
+
306
+ # 将序列分成 segments
307
+ padded_len = num_segments * self.segment_len
308
+ if seq_len < padded_len:
309
+ hidden_states = nn.functional.pad(
310
+ hidden_states,
311
+ (0, 0, 0, padded_len - seq_len)
312
+ )
313
+
314
+ hidden_states = rearrange(
315
+ hidden_states,
316
+ 'b (s n) d -> b s n d',
317
+ n=self.segment_len
318
+ )
319
+
320
+ # 在每个 segment 前添加 memory tokens
321
+ hidden_states = torch.cat([longterm, hidden_states], dim=2)
322
+
323
+ # 合并回完整序列
324
+ hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d')
325
+
326
+ # 添加持久记忆 tokens 在最前面
327
+ persist = repeat(self.persist_mem, 'n d -> b n d', b=batch_size)
328
+ hidden_states = torch.cat([persist, hidden_states], dim=1)
329
+
330
+ return hidden_states
331
+
332
+ def forward(
333
+ self,
334
+ input_ids: Tensor,
335
+ attention_mask: Optional[Tensor] = None,
336
+ memory_states: Optional[dict] = None,
337
+ **kwargs
338
+ ):
339
+ batch_size = input_ids.shape[0]
340
+
341
+ # 获取 token embeddings
342
+ if hasattr(self.qwen.model, 'embed_tokens'):
343
+ hidden_states = self.qwen.model.embed_tokens(input_ids)
344
+ else:
345
+ hidden_states = self.qwen.get_input_embeddings()(input_ids)
346
+
347
+ # 添加 memory tokens
348
+ hidden_states = self.prepare_inputs_with_memory(hidden_states, batch_size)
349
+
350
+ # 初始化记忆状态
351
+ if memory_states is None:
352
+ memory_states = {}
353
+
354
+ next_memory_states = {}
355
+
356
+ # 遍历 Qwen 的层
357
+ for layer_idx, layer in enumerate(self.qwen.model.layers):
358
+ # 标准的 transformer 层前向传播
359
+ layer_outputs = layer(
360
+ hidden_states,
361
+ attention_mask=None, # 需要修改 attention mask 来处理 memory tokens
362
+ **kwargs
363
+ )
364
+ hidden_states = layer_outputs[0]
365
+
366
+ # 在指定层应用 Titans 记忆
367
+ if str(layer_idx) in self.neural_memories:
368
+ neural_mem = self.neural_memories[str(layer_idx)]
369
+ mem_state = memory_states.get(str(layer_idx))
370
+
371
+ retrieved, next_state = neural_mem(
372
+ hidden_states,
373
+ state=mem_state
374
+ )
375
+
376
+ # 融合检索到的记忆
377
+ hidden_states = hidden_states + retrieved * 0.1 # 可学习的权重
378
+ next_memory_states[str(layer_idx)] = next_state
379
+
380
+ # 最终的 layer norm
381
+ hidden_states = self.qwen.model.norm(hidden_states)
382
+
383
+ # 计算 logits
384
+ logits = self.qwen.lm_head(hidden_states)
385
+
386
+ return {
387
+ 'logits': logits,
388
+ 'hidden_states': hidden_states,
389
+ 'memory_states': next_memory_states
390
+ }
391
+
392
+
393
+ # ============================================================================
394
+ # 使用示例
395
+ # ============================================================================
396
+
397
+ def example_usage():
398
+ """展示如何使用上述集成方案"""
399
+
400
+ print("=" * 60)
401
+ print("Titans Neural Memory 与 Qwen 集成示例")
402
+ print("=" * 60)
403
+
404
+ # 注意:需要先安装 transformers 和 qwen 相关依赖
405
+ # pip install transformers torch titans-pytorch
406
+
407
+ try:
408
+ from transformers import AutoModelForCausalLM, AutoTokenizer
409
+
410
+ # 加载 Qwen 模型(以 Qwen2-0.5B 为例)
411
+ model_name = "Qwen/Qwen2-0.5B"
412
+
413
+ print(f"\n加载模型: {model_name}")
414
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
415
+ qwen_model = AutoModelForCausalLM.from_pretrained(
416
+ model_name,
417
+ torch_dtype=torch.float16,
418
+ device_map="auto",
419
+ trust_remote_code=True
420
+ )
421
+
422
+ # 获取隐藏层大小
423
+ hidden_size = qwen_model.config.hidden_size
424
+ print(f"模型隐藏层大小: {hidden_size}")
425
+
426
+ # 方案 1: 简单包装器
427
+ print("\n--- 方案 1: TitansMemoryWrapper ---")
428
+ wrapped_model = TitansMemoryWrapper(
429
+ qwen_model=qwen_model,
430
+ hidden_size=hidden_size,
431
+ chunk_size=64,
432
+ memory_batch_size=128,
433
+ )
434
+
435
+ # 测试输入
436
+ text = "人工智能的发展历程"
437
+ inputs = tokenizer(text, return_tensors="pt")
438
+
439
+ with torch.no_grad():
440
+ outputs = wrapped_model(
441
+ input_ids=inputs.input_ids.to(qwen_model.device),
442
+ )
443
+ print(f"输出 logits 形状: {outputs['logits'].shape}")
444
+ print(f"记忆状态: {type(outputs['memory_state'])}")
445
+
446
+ except ImportError as e:
447
+ print(f"\n注意: 需要安装相关依赖")
448
+ print(f"pip install transformers torch titans-pytorch")
449
+ print(f"错误: {e}")
450
+
451
+ # 独立测试 NeuralMemory
452
+ print("\n--- 独立测试 NeuralMemory ---")
453
+
454
+ mem = NeuralMemory(
455
+ dim=384,
456
+ chunk_size=64,
457
+ batch_size=128,
458
+ dim_head=64,
459
+ heads=4,
460
+ model=MemoryMLP(dim=64, depth=2),
461
+ momentum=True,
462
+ ).cuda() if torch.cuda.is_available() else NeuralMemory(
463
+ dim=384,
464
+ chunk_size=64,
465
+ batch_size=128,
466
+ dim_head=64,
467
+ heads=4,
468
+ model=MemoryMLP(dim=64, depth=2),
469
+ momentum=True,
470
+ )
471
+
472
+ # 模拟输入
473
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
474
+ seq = torch.randn(2, 256, 384).to(device)
475
+
476
+ retrieved, mem_state = mem(seq)
477
+ print(f"输入形状: {seq.shape}")
478
+ print(f"检索输出形状: {retrieved.shape}")
479
+ print(f"记忆状态序列索引: {mem_state.seq_index}")
480
+
481
+ print("\n" + "=" * 60)
482
+ print("集成完成!")
483
+ print("=" * 60)
484
+
485
+
486
+ if __name__ == "__main__":
487
+ example_usage()
examples/run_training.sh ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Qwen3-4B + Titans 在 BABILong QA1 32k 上的训练脚本
4
+
5
+ # 设置环境变量
6
+ # 如果你在外部已 export CUDA_VISIBLE_DEVICES,这里不要覆盖
7
+ # 否则默认用满 8 卡
8
+ if [ -z "${CUDA_VISIBLE_DEVICES}" ]; then
9
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
10
+ fi
11
+ echo "CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES}"
12
+ export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512
13
+
14
+ # 进入目录
15
+ cd /root/githubs/titans-pytorch
16
+
17
+ # 创建输出目录
18
+ mkdir -p outputs
19
+
20
+ # 可选:首次运行安装依赖(会改动当前 Python 环境,可能触发依赖冲突;建议你手动在独立环境里装)
21
+ # echo "安装依赖..."
22
+ # pip install -e .
23
+
24
+ # 运行训练
25
+ echo "开始训练..."
26
+ IFS=',' read -ra _GPU_ARR <<< "${CUDA_VISIBLE_DEVICES}"
27
+ NPROC=${#_GPU_ARR[@]}
28
+ echo "torchrun nproc_per_node=${NPROC}"
29
+
30
+ EXTRA_ARGS=""
31
+ # 默认启用 FSDP(更省显存;DDP 在 32k/4k chunk 下容易接近 80GB)
32
+ if [ -z "${USE_FSDP}" ]; then
33
+ export USE_FSDP=1
34
+ fi
35
+
36
+ if [ "${USE_FSDP}" = "1" ]; then
37
+ EXTRA_ARGS="--fsdp"
38
+ echo "启用 FSDP"
39
+ else
40
+ echo "使用 DDP"
41
+ fi
42
+
43
+ torchrun --standalone --nproc_per_node=${NPROC} examples/train_qwen_titans_babilong.py ${EXTRA_ARGS} \
44
+ 2>&1 | tee outputs/training_$(date +%Y%m%d_%H%M%S).log
45
+
46
+ echo "训练完成!"
examples/train_qwen_baseline_babilong_v4.py ADDED
@@ -0,0 +1,1361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Qwen3 baseline (NO Titans) - BABILong QA1 (32k) with Cross-Chunk Gradients
3
+
4
+ Control-group purpose:
5
+ - Remove Titans memory modules entirely
6
+ - Train ONLY Qwen's `embed_tokens` and `lm_head`
7
+ - Keep ALL other training settings the same as `train_qwen_titans_babilong_v4.py`
8
+
9
+ Key design (mirrors v4 training script behavior):
10
+ 1. Freeze Qwen backbone EXCEPT embed_tokens (trainable for input adaptation)
11
+ 2. Untie lm_head from embed_tokens if they share weights
12
+ 3. Train: embed_tokens + lm_head
13
+ 4. Keep chunkwise_backward=False + full-sequence backward (cross-chunk graph)
14
+ 5. Keep gradient_checkpointing & manual gradient all-reduce strategy for multi-GPU
15
+ """
16
+
17
+ import os
18
+ import sys
19
+
20
+ # =============================================================================
21
+ # CRITICAL: Disable torchao BEFORE importing transformers to avoid version conflicts
22
+ # =============================================================================
23
+ os.environ["TRANSFORMERS_NO_TORCHAO"] = "1"
24
+
25
+
26
+ # Mock torchao to prevent import errors (same hack as v4)
27
+ class _MockTorchAO:
28
+ def __getattr__(self, name):
29
+ return _MockTorchAO()
30
+
31
+ def __call__(self, *args, **kwargs):
32
+ return _MockTorchAO()
33
+
34
+
35
+ sys.modules["torchao"] = _MockTorchAO()
36
+ sys.modules["torchao.quantization"] = _MockTorchAO()
37
+
38
+ import json
39
+ import math
40
+ import argparse
41
+ import logging
42
+ from contextlib import nullcontext
43
+ from dataclasses import dataclass, asdict
44
+ from typing import Optional, Dict, List
45
+
46
+ import torch
47
+ import torch.nn as nn
48
+ import torch.nn.functional as F
49
+ import torch.distributed as dist
50
+ from torch.utils.data import Dataset, DataLoader
51
+ from torch.optim import AdamW
52
+ from torch.optim.lr_scheduler import CosineAnnealingLR
53
+ from torch.nn.parallel import DistributedDataParallel as DDP
54
+ from tqdm import tqdm
55
+
56
+
57
+ logging.basicConfig(
58
+ level=logging.INFO,
59
+ format="%(asctime)s - %(levelname)s - %(message)s",
60
+ )
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ # =============================================================================
65
+ # Configuration (keep defaults identical to v4, except output_dir/use_memory)
66
+ # =============================================================================
67
+
68
+
69
+ @dataclass
70
+ class TrainingConfig:
71
+ # paths
72
+ model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554"
73
+ data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json"
74
+ output_dir: str = "./outputs/qwen_baseline_babilong_v4"
75
+
76
+ # training
77
+ num_epochs: int = 10
78
+ batch_size: int = 1
79
+ gradient_accumulation_steps: int = 16
80
+ max_grad_norm: float = 1.0
81
+
82
+ # learning rates (keep v4 values)
83
+ lr_embed: float = 1e-5 # Learning rate for embed_tokens
84
+ lr_lm_head: float = 1e-4 # Learning rate for lm_head
85
+ weight_decay: float = 0.01
86
+ warmup_steps: int = 100
87
+
88
+ # streaming (keep v4 values)
89
+ chunk_size: int = 4096
90
+
91
+ # evaluation / logging
92
+ eval_steps: int = 200
93
+ eval_topk: int = 0
94
+ logging_steps: int = 10
95
+ log_every_batches: int = 80
96
+ final_eval_print_examples: int = 10
97
+ debug_data_samples: int = 0
98
+ debug_label_batches: int = 0
99
+ debug_eval_stats: bool = False
100
+ debug_grad_norm: bool = False
101
+
102
+ # precision
103
+ bf16: bool = True
104
+ fp16: bool = False
105
+ use_tf32: bool = True
106
+ gradient_checkpointing: bool = True
107
+ chunkwise_backward: bool = False # keep v4 default (full-sequence backward)
108
+
109
+ # data
110
+ max_length: int = 32768
111
+ answer_reserve_tokens: int = 64
112
+ label_prefix_tokens: int = 0
113
+ max_samples: Optional[int] = 500
114
+
115
+ # distributed
116
+ use_fsdp: bool = False
117
+ fsdp_use_orig_params: bool = True
118
+ ddp_find_unused_parameters: bool = False
119
+
120
+ # checkpoint
121
+ save_full_checkpoint: bool = True
122
+ final_ckpt_name: str = "final_memory_checkpoint.pt" # keep same key layout for compatibility
123
+ final_full_ckpt_name: str = "final_full_checkpoint.pt"
124
+
125
+ seed: int = 42
126
+
127
+
128
+ # =============================================================================
129
+ # Dataset (identical to v4)
130
+ # =============================================================================
131
+
132
+
133
+ class BABILongDataset(Dataset):
134
+ def __init__(
135
+ self,
136
+ data_path: str,
137
+ tokenizer,
138
+ max_length: int = 32768,
139
+ answer_reserve_tokens: int = 64,
140
+ label_prefix_tokens: int = 0,
141
+ max_samples: Optional[int] = None,
142
+ ):
143
+ self.tokenizer = tokenizer
144
+ self.max_length = max_length
145
+ self.answer_reserve_tokens = answer_reserve_tokens
146
+ self.label_prefix_tokens = int(label_prefix_tokens)
147
+
148
+ logger.info(f"Loading dataset: {data_path}")
149
+ with open(data_path, "r") as f:
150
+ self.data = json.load(f)
151
+
152
+ if max_samples:
153
+ self.data = self.data[:max_samples]
154
+
155
+ logger.info(f"Dataset size: {len(self.data)}")
156
+
157
+ def __len__(self):
158
+ return len(self.data)
159
+
160
+ def __getitem__(self, idx):
161
+ item = self.data[idx]
162
+ text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:"
163
+ target = item["target"]
164
+
165
+ pad_id = self.tokenizer.pad_token_id or 0
166
+ reserve = int(self.answer_reserve_tokens)
167
+
168
+ prompt_ids = self.tokenizer(
169
+ text,
170
+ max_length=max(self.max_length - reserve, 1),
171
+ truncation=True,
172
+ add_special_tokens=True,
173
+ return_tensors="pt",
174
+ ).input_ids.squeeze(0)
175
+
176
+ answer_ids = self.tokenizer(
177
+ f" {target}",
178
+ add_special_tokens=False,
179
+ return_tensors="pt",
180
+ ).input_ids.squeeze(0)
181
+
182
+ available = max(self.max_length - prompt_ids.numel(), 0)
183
+ answer_ids = answer_ids[:available]
184
+
185
+ input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length]
186
+
187
+ labels = torch.full_like(input_ids, fill_value=-100)
188
+ if answer_ids.numel() > 0:
189
+ start = prompt_ids.numel()
190
+ end = min(start + answer_ids.numel(), labels.numel())
191
+ labels[start:end] = input_ids[start:end]
192
+ if self.label_prefix_tokens > 0:
193
+ prefix = min(start, self.label_prefix_tokens)
194
+ if prefix > 0:
195
+ labels[start - prefix : start] = input_ids[start - prefix : start]
196
+
197
+ seq_len = input_ids.numel()
198
+ if seq_len < self.max_length:
199
+ pad_len = self.max_length - seq_len
200
+ input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id))
201
+ labels = F.pad(labels, (0, pad_len), value=-100)
202
+ attention_mask = torch.cat(
203
+ [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)],
204
+ dim=0,
205
+ )
206
+ else:
207
+ attention_mask = torch.ones(self.max_length, dtype=torch.long)
208
+
209
+ return {
210
+ "input_ids": input_ids.to(dtype=torch.long),
211
+ "labels": labels.to(dtype=torch.long),
212
+ "attention_mask": attention_mask,
213
+ }
214
+
215
+
216
+ def collate_fn(batch):
217
+ keys = batch[0].keys()
218
+ return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys}
219
+
220
+
221
+ # =============================================================================
222
+ # Baseline Model Wrapper (NO Titans)
223
+ # =============================================================================
224
+
225
+
226
+ class QwenBaselineForBABILongV4(nn.Module):
227
+ """
228
+ Baseline wrapper that mirrors v4's chunk streaming + loss computation,
229
+ but WITHOUT any Titans memory integration.
230
+
231
+ Trainable: embed_tokens + lm_head
232
+ Frozen: all transformer layers
233
+ """
234
+
235
+ def __init__(self, qwen_model, config: TrainingConfig):
236
+ super().__init__()
237
+ self.qwen = qwen_model
238
+ self.config = config
239
+ self.hidden_size = qwen_model.config.hidden_size
240
+ self.num_attention_heads = qwen_model.config.num_attention_heads
241
+
242
+ self._freeze_backbone()
243
+
244
+ logger.info("[QwenBaselineForBABILongV4] Initialized (NO TITANS)")
245
+ logger.info("Trainable: embed_tokens + lm_head | Frozen: transformer layers")
246
+ logger.info(f" - hidden_size: {self.hidden_size}")
247
+ logger.info(f" - num_attention_heads: {self.num_attention_heads}")
248
+ logger.info(f" - chunk_size: {config.chunk_size}")
249
+ logger.info(f" - chunkwise_backward: {config.chunkwise_backward}")
250
+
251
+ def _freeze_backbone(self):
252
+ """
253
+ Freeze Qwen transformer layers, keep embed_tokens + lm_head trainable.
254
+ Also untie lm_head from embed_tokens if they share weights (same as v4).
255
+ """
256
+ # Untie if tied (allows independent training & param grouping)
257
+ if hasattr(self.qwen, "lm_head") and hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"):
258
+ lm_head_weight = self.qwen.lm_head.weight
259
+ embed_weight = self.qwen.model.embed_tokens.weight
260
+ try:
261
+ has_tied_weights = lm_head_weight.data_ptr() == embed_weight.data_ptr()
262
+ except Exception:
263
+ has_tied_weights = False
264
+
265
+ if has_tied_weights:
266
+ logger.info("[baseline v4] Detected tied weights - untying lm_head from embed_tokens")
267
+ new_lm_head = nn.Linear(
268
+ self.qwen.lm_head.in_features,
269
+ self.qwen.lm_head.out_features,
270
+ bias=self.qwen.lm_head.bias is not None,
271
+ device=lm_head_weight.device,
272
+ dtype=lm_head_weight.dtype,
273
+ )
274
+ with torch.no_grad():
275
+ new_lm_head.weight.copy_(lm_head_weight)
276
+ if self.qwen.lm_head.bias is not None and new_lm_head.bias is not None:
277
+ new_lm_head.bias.copy_(self.qwen.lm_head.bias)
278
+ self.qwen.lm_head = new_lm_head
279
+ logger.info(f"[baseline v4] Created independent lm_head: {new_lm_head.weight.shape}")
280
+
281
+ # Freeze everything
282
+ for _, p in self.named_parameters():
283
+ p.requires_grad = False
284
+
285
+ # Unfreeze embed_tokens
286
+ if hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"):
287
+ for p in self.qwen.model.embed_tokens.parameters():
288
+ p.requires_grad = True
289
+ else:
290
+ emb = self.qwen.get_input_embeddings()
291
+ if emb is not None:
292
+ for p in emb.parameters():
293
+ p.requires_grad = True
294
+
295
+ # Unfreeze lm_head (output embeddings)
296
+ if hasattr(self.qwen, "lm_head"):
297
+ for p in self.qwen.lm_head.parameters():
298
+ p.requires_grad = True
299
+ else:
300
+ out_emb = self.qwen.get_output_embeddings()
301
+ if out_emb is not None:
302
+ for p in out_emb.parameters():
303
+ p.requires_grad = True
304
+
305
+ # Log counts
306
+ frozen_count = 0
307
+ trainable_count = 0
308
+ embed_count = 0
309
+ lm_head_count = 0
310
+ for name, param in self.named_parameters():
311
+ if param.requires_grad:
312
+ trainable_count += 1
313
+ if "embed_tokens" in name:
314
+ embed_count += 1
315
+ logger.info(f"[baseline v4] embed_tokens trainable: {name}")
316
+ elif "lm_head" in name:
317
+ lm_head_count += 1
318
+ logger.info(f"[baseline v4] lm_head trainable: {name}")
319
+ else:
320
+ frozen_count += 1
321
+
322
+ logger.info(f"[baseline v4] Frozen {frozen_count} parameters")
323
+ logger.info(f"[baseline v4] Trainable {trainable_count} parameters (embed: {embed_count} + lm_head: {lm_head_count})")
324
+
325
+ def _split_into_chunks(self, tensor: torch.Tensor, chunk_size: int):
326
+ seq_len = tensor.shape[1]
327
+ chunks = []
328
+ for start in range(0, seq_len, chunk_size):
329
+ end = min(start + chunk_size, seq_len)
330
+ chunks.append((start, end, tensor[:, start:end]))
331
+ return chunks
332
+
333
+ def _process_chunk(
334
+ self,
335
+ chunk_ids: torch.Tensor,
336
+ chunk_attention_mask: Optional[torch.Tensor] = None,
337
+ ) -> torch.Tensor:
338
+ if hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"):
339
+ token_embeds = self.qwen.model.embed_tokens(chunk_ids)
340
+ else:
341
+ token_embeds = self.qwen.get_input_embeddings()(chunk_ids)
342
+
343
+ outputs = self.qwen.model(
344
+ inputs_embeds=token_embeds,
345
+ attention_mask=chunk_attention_mask,
346
+ use_cache=False,
347
+ output_hidden_states=False,
348
+ return_dict=True,
349
+ )
350
+ return outputs.last_hidden_state
351
+
352
+ def forward(
353
+ self,
354
+ input_ids: torch.Tensor,
355
+ attention_mask: Optional[torch.Tensor] = None,
356
+ labels: Optional[torch.Tensor] = None,
357
+ return_pred_tokens: bool = False,
358
+ topk: int = 0,
359
+ chunk_start: Optional[int] = None,
360
+ chunk_end: Optional[int] = None,
361
+ reset_mem_state: bool = False, # kept for Trainer API compatibility
362
+ ) -> Dict[str, torch.Tensor]:
363
+ # Single chunk forward (for chunkwise backward)
364
+ if chunk_start is not None or chunk_end is not None:
365
+ start = 0 if chunk_start is None else int(chunk_start)
366
+ end = int(chunk_end) if chunk_end is not None else None
367
+ return self._forward_single_chunk(
368
+ input_ids=input_ids,
369
+ attention_mask=attention_mask,
370
+ labels=labels,
371
+ chunk_start=start,
372
+ chunk_end=end,
373
+ )
374
+
375
+ # Full sequence forward (streaming chunks)
376
+ batch_size, _ = input_ids.shape
377
+ chunk_size = self.config.chunk_size
378
+ chunks = self._split_into_chunks(input_ids, chunk_size)
379
+
380
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
381
+ total_loss_sum = None
382
+ total_loss_tokens = 0
383
+
384
+ topk_correct = None
385
+ topk_total = None
386
+
387
+ pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
388
+ target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
389
+
390
+ if topk and topk > 0:
391
+ device = input_ids.device
392
+ topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32)
393
+ topk_total = torch.tensor(0.0, device=device, dtype=torch.float32)
394
+
395
+ for start, end, _ in chunks:
396
+ proc_start = max(0, start - 1)
397
+ chunk_ids = input_ids[:, proc_start:end]
398
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
399
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
400
+
401
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
402
+
403
+ if chunk_labels is not None and (chunk_labels != -100).any():
404
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
405
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
406
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
407
+
408
+ valid = shift_labels != -100
409
+ if valid.any():
410
+ hs = shift_hidden[valid]
411
+ targets = shift_labels[valid]
412
+
413
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
414
+ logits = self.qwen.lm_head(hs)
415
+ logits = logits.float()
416
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
417
+ targets = targets.to(device=logits.device)
418
+
419
+ chunk_loss_sum = loss_fct_sum(logits, targets)
420
+ total_loss_sum = chunk_loss_sum if total_loss_sum is None else (total_loss_sum + chunk_loss_sum)
421
+ total_loss_tokens += targets.numel()
422
+
423
+ if topk and topk > 0:
424
+ k = min(int(topk), logits.shape[-1])
425
+ topk_ids = torch.topk(logits, k=k, dim=-1).indices
426
+ correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1)
427
+ topk_correct = topk_correct + correct.float().sum()
428
+ topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device)
429
+
430
+ if return_pred_tokens:
431
+ idx = valid.nonzero(as_tuple=False)
432
+ pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist()
433
+ tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist()
434
+ b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist()
435
+
436
+ for i, b_idx in enumerate(b_idx_flat):
437
+ pred_tokens_by_sample[b_idx].append(int(pred_flat[i]))
438
+ target_tokens_by_sample[b_idx].append(int(tgt_flat[i]))
439
+
440
+ if total_loss_sum is None or total_loss_tokens == 0:
441
+ device = next(self.qwen.parameters()).device
442
+ loss = torch.zeros((), device=device, dtype=torch.float32)
443
+ else:
444
+ loss = total_loss_sum / total_loss_tokens
445
+
446
+ out: Dict[str, torch.Tensor] = {"loss": loss}
447
+
448
+ if return_pred_tokens:
449
+ lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long)
450
+ max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0
451
+ if max_len > 0:
452
+ pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
453
+ tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
454
+ for b in range(batch_size):
455
+ L = int(lengths[b].item())
456
+ if L > 0:
457
+ pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long)
458
+ tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long)
459
+ else:
460
+ pred_mat = torch.empty((batch_size, 0), dtype=torch.long)
461
+ tgt_mat = torch.empty((batch_size, 0), dtype=torch.long)
462
+ out["pred_ids"] = pred_mat
463
+ out["target_ids"] = tgt_mat
464
+ out["target_lengths"] = lengths
465
+
466
+ if topk and topk > 0 and topk_correct is not None and topk_total is not None:
467
+ out["topk_correct"] = topk_correct
468
+ out["topk_total"] = topk_total
469
+
470
+ return out
471
+
472
+ def _forward_single_chunk(
473
+ self,
474
+ input_ids: torch.Tensor,
475
+ attention_mask: Optional[torch.Tensor],
476
+ labels: Optional[torch.Tensor],
477
+ chunk_start: int,
478
+ chunk_end: Optional[int],
479
+ ) -> Dict[str, torch.Tensor]:
480
+ seq_len = input_ids.shape[1]
481
+ end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len)
482
+ end = min(int(end), seq_len)
483
+ start = max(0, int(chunk_start))
484
+
485
+ proc_start = max(0, start - 1)
486
+ chunk_ids = input_ids[:, proc_start:end]
487
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
488
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
489
+
490
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
491
+
492
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
493
+ total_loss_sum = None
494
+ total_loss_tokens = 0
495
+
496
+ if chunk_labels is not None and (chunk_labels != -100).any():
497
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
498
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
499
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
500
+
501
+ valid = shift_labels != -100
502
+ if valid.any():
503
+ hs = shift_hidden[valid]
504
+ targets = shift_labels[valid]
505
+
506
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
507
+ logits = self.qwen.lm_head(hs)
508
+ logits = logits.float()
509
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
510
+ targets = targets.to(device=logits.device)
511
+
512
+ total_loss_sum = loss_fct_sum(logits, targets)
513
+ total_loss_tokens = targets.numel()
514
+
515
+ if total_loss_sum is None:
516
+ total_loss_sum = hidden_full.float().sum() * 0.0
517
+
518
+ return {
519
+ "loss_sum": total_loss_sum,
520
+ "loss_tokens": total_loss_tokens,
521
+ "has_grad": True,
522
+ }
523
+
524
+ def get_param_groups(self, config: TrainingConfig):
525
+ # Prefer module-based collection for robustness (name patterns can vary across model code)
526
+ embed_mod = None
527
+ if hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"):
528
+ embed_mod = self.qwen.model.embed_tokens
529
+ else:
530
+ embed_mod = self.qwen.get_input_embeddings()
531
+
532
+ head_mod = self.qwen.lm_head if hasattr(self.qwen, "lm_head") else self.qwen.get_output_embeddings()
533
+
534
+ embed_params = [p for p in (embed_mod.parameters() if embed_mod is not None else []) if p.requires_grad]
535
+ lm_head_params = [p for p in (head_mod.parameters() if head_mod is not None else []) if p.requires_grad]
536
+
537
+ # De-duplicate defensively (in case of unexpected weight tying)
538
+ seen = set()
539
+
540
+ def _uniq(params):
541
+ out = []
542
+ for p in params:
543
+ pid = id(p)
544
+ if pid in seen:
545
+ continue
546
+ out.append(p)
547
+ seen.add(pid)
548
+ return out
549
+
550
+ embed_params = _uniq(embed_params)
551
+ lm_head_params = _uniq(lm_head_params)
552
+
553
+ param_groups = []
554
+ if len(embed_params) > 0:
555
+ param_groups.append(
556
+ {
557
+ "params": embed_params,
558
+ "lr": config.lr_embed,
559
+ "weight_decay": config.weight_decay,
560
+ "name": "embed_tokens",
561
+ }
562
+ )
563
+ if len(lm_head_params) > 0:
564
+ param_groups.append(
565
+ {
566
+ "params": lm_head_params,
567
+ "lr": config.lr_lm_head,
568
+ "weight_decay": config.weight_decay,
569
+ "name": "lm_head",
570
+ }
571
+ )
572
+
573
+ logger.info(f"[baseline v4 Param groups] embed_tokens={len(embed_params)}, lm_head={len(lm_head_params)}")
574
+ return param_groups
575
+
576
+
577
+ # =============================================================================
578
+ # Distributed Training Utilities (copied from v4)
579
+ # =============================================================================
580
+
581
+
582
+ def init_distributed() -> tuple:
583
+ if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ:
584
+ return False, 0, 0, 1
585
+
586
+ rank = int(os.environ["RANK"])
587
+ world_size = int(os.environ["WORLD_SIZE"])
588
+ local_rank = int(os.environ.get("LOCAL_RANK", 0))
589
+
590
+ if not dist.is_available():
591
+ raise RuntimeError("torch.distributed not available")
592
+
593
+ if not dist.is_initialized():
594
+ dist.init_process_group(backend="nccl", init_method="env://")
595
+
596
+ torch.cuda.set_device(local_rank)
597
+ return True, rank, local_rank, world_size
598
+
599
+
600
+ def cleanup_distributed():
601
+ if dist.is_available() and dist.is_initialized():
602
+ dist.barrier()
603
+ dist.destroy_process_group()
604
+
605
+
606
+ def unwrap_model(model: nn.Module) -> nn.Module:
607
+ if hasattr(model, "module"):
608
+ return model.module
609
+ if hasattr(model, "_fsdp_wrapped_module"):
610
+ wrapped = getattr(model, "_fsdp_wrapped_module", None)
611
+ if wrapped is not None and hasattr(wrapped, "module"):
612
+ return wrapped.module
613
+ return model
614
+
615
+
616
+ def is_fsdp_model(model: nn.Module) -> bool:
617
+ try:
618
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
619
+
620
+ return isinstance(model, FSDP)
621
+ except Exception:
622
+ return False
623
+
624
+
625
+ def manual_all_reduce_gradients(model: nn.Module, world_size: int) -> None:
626
+ """
627
+ Manually synchronize gradients across GPUs without DDP.
628
+ Kept identical to v4 to match multi-GPU strategy under cross-chunk graphs.
629
+ """
630
+ if world_size <= 1:
631
+ return
632
+
633
+ grads_to_reduce = []
634
+ for param in model.parameters():
635
+ if param.grad is not None:
636
+ grads_to_reduce.append(param.grad)
637
+
638
+ if len(grads_to_reduce) == 0:
639
+ return
640
+
641
+ total_numel = sum(g.numel() for g in grads_to_reduce)
642
+ flat_grads = torch.zeros(
643
+ total_numel,
644
+ dtype=grads_to_reduce[0].dtype,
645
+ device=grads_to_reduce[0].device,
646
+ )
647
+
648
+ offset = 0
649
+ for grad in grads_to_reduce:
650
+ numel = grad.numel()
651
+ flat_grads[offset : offset + numel] = grad.view(-1)
652
+ offset += numel
653
+
654
+ dist.all_reduce(flat_grads, op=dist.ReduceOp.SUM)
655
+ flat_grads.div_(world_size)
656
+
657
+ offset = 0
658
+ for grad in grads_to_reduce:
659
+ numel = grad.numel()
660
+ grad.copy_(flat_grads[offset : offset + numel].view_as(grad))
661
+ offset += numel
662
+
663
+
664
+ # =============================================================================
665
+ # Trainer (same logic as v4; model provides get_param_groups + forward API)
666
+ # =============================================================================
667
+
668
+
669
+ class Trainer:
670
+ def __init__(
671
+ self,
672
+ model: nn.Module,
673
+ train_dataloader: DataLoader,
674
+ eval_dataloader: DataLoader,
675
+ config: TrainingConfig,
676
+ rank: int = 0,
677
+ world_size: int = 1,
678
+ is_distributed: bool = False,
679
+ tokenizer=None,
680
+ use_manual_grad_sync: bool = False,
681
+ ):
682
+ self.model = model
683
+ self.train_dataloader = train_dataloader
684
+ self.eval_dataloader = eval_dataloader
685
+ self.config = config
686
+ self.device = next(model.parameters()).device
687
+ self.rank = rank
688
+ self.world_size = world_size
689
+ self.is_distributed = is_distributed
690
+ self.is_main_process = rank == 0
691
+ self.tokenizer = tokenizer
692
+ self.use_manual_grad_sync = use_manual_grad_sync
693
+
694
+ base_model = unwrap_model(self.model)
695
+ param_groups = base_model.get_param_groups(config)
696
+ self.optimizer = AdamW(param_groups)
697
+
698
+ total_steps = math.ceil(
699
+ (len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1)
700
+ )
701
+ self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7)
702
+
703
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
704
+ self.global_step = 0
705
+
706
+ def _get_group_lr(self, group_name: str) -> Optional[float]:
707
+ for group in self.optimizer.param_groups:
708
+ if group.get("name") == group_name:
709
+ return group.get("lr")
710
+ return None
711
+
712
+ def train(self):
713
+ self.model.train()
714
+ if self.is_main_process:
715
+ logger.info("=" * 60)
716
+ logger.info("Starting baseline v4 training (NO TITANS, FROZEN backbone)")
717
+ logger.info("=" * 60)
718
+
719
+ last_epoch_loss = None
720
+ for epoch in range(self.config.num_epochs):
721
+ sampler = getattr(self.train_dataloader, "sampler", None)
722
+ if sampler is not None and hasattr(sampler, "set_epoch"):
723
+ sampler.set_epoch(epoch)
724
+ if self.is_main_process:
725
+ logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}")
726
+
727
+ epoch_loss = 0.0
728
+ num_batches = 0
729
+
730
+ pbar = self.train_dataloader
731
+ if self.is_main_process:
732
+ pbar = tqdm(
733
+ self.train_dataloader,
734
+ desc=f"Epoch {epoch + 1}/{self.config.num_epochs}",
735
+ leave=False,
736
+ dynamic_ncols=True,
737
+ )
738
+
739
+ for step, batch in enumerate(pbar):
740
+ batch = {k: v.to(self.device) for k, v in batch.items()}
741
+
742
+ ga = max(self.config.gradient_accumulation_steps, 1)
743
+ sync_gradients = ((step + 1) % ga == 0)
744
+ amp_enabled = self.config.fp16 or self.config.bf16
745
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
746
+
747
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
748
+ if self.config.chunkwise_backward:
749
+ labels = batch.get("labels")
750
+ if labels is not None:
751
+ total_tokens = int((labels[:, 1:] != -100).sum().item())
752
+ else:
753
+ total_tokens = 0
754
+ loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga)
755
+
756
+ seq_len = batch["input_ids"].shape[1]
757
+ chunk_size = int(self.config.chunk_size)
758
+ chunk_ranges = [
759
+ (start, min(start + chunk_size, seq_len)) for start in range(0, seq_len, chunk_size)
760
+ ]
761
+ raw_loss_sum = None
762
+
763
+ for idx, (start, end) in enumerate(chunk_ranges):
764
+ is_last_chunk = idx == (len(chunk_ranges) - 1)
765
+ sync_chunk = sync_gradients and is_last_chunk
766
+
767
+ use_no_sync = (
768
+ self.is_distributed
769
+ and not sync_chunk
770
+ and not self.use_manual_grad_sync
771
+ and hasattr(self.model, "no_sync")
772
+ )
773
+ chunk_ctx = self.model.no_sync if use_no_sync else nullcontext
774
+ with chunk_ctx():
775
+ outputs = self.model(
776
+ input_ids=batch["input_ids"],
777
+ attention_mask=batch["attention_mask"],
778
+ labels=labels,
779
+ chunk_start=start,
780
+ chunk_end=end,
781
+ reset_mem_state=(idx == 0),
782
+ )
783
+ chunk_loss_sum = outputs["loss_sum"]
784
+ raw_loss_sum = chunk_loss_sum.detach() if raw_loss_sum is None else (raw_loss_sum + chunk_loss_sum.detach())
785
+
786
+ scaled_loss = chunk_loss_sum * float(loss_scale)
787
+ if self.config.fp16:
788
+ self.scaler.scale(scaled_loss).backward()
789
+ else:
790
+ scaled_loss.backward()
791
+
792
+ if raw_loss_sum is None or total_tokens == 0:
793
+ raw_loss = torch.zeros((), device=self.device, dtype=torch.float32)
794
+ else:
795
+ raw_loss = raw_loss_sum / total_tokens
796
+ loss = raw_loss / ga
797
+ else:
798
+ use_no_sync = (
799
+ self.is_distributed
800
+ and not sync_gradients
801
+ and not self.use_manual_grad_sync
802
+ and hasattr(self.model, "no_sync")
803
+ )
804
+ ctx = self.model.no_sync if use_no_sync else nullcontext
805
+ with ctx():
806
+ outputs = self.model(
807
+ input_ids=batch["input_ids"],
808
+ attention_mask=batch["attention_mask"],
809
+ labels=batch["labels"],
810
+ )
811
+ raw_loss = outputs["loss"]
812
+ loss = raw_loss / ga
813
+
814
+ if self.config.fp16:
815
+ self.scaler.scale(loss).backward()
816
+ else:
817
+ loss.backward()
818
+
819
+ epoch_loss += raw_loss.detach().float().item()
820
+ num_batches += 1
821
+
822
+ if sync_gradients:
823
+ grad_norm = None
824
+
825
+ if self.use_manual_grad_sync and self.world_size > 1:
826
+ if self.config.fp16:
827
+ self.scaler.unscale_(self.optimizer)
828
+ manual_all_reduce_gradients(self.model, self.world_size)
829
+
830
+ if self.config.fp16:
831
+ if not self.use_manual_grad_sync:
832
+ self.scaler.unscale_(self.optimizer)
833
+ grad_norm = torch.nn.utils.clip_grad_norm_(
834
+ self.model.parameters(), self.config.max_grad_norm
835
+ )
836
+ self.scaler.step(self.optimizer)
837
+ self.scaler.update()
838
+ else:
839
+ grad_norm = torch.nn.utils.clip_grad_norm_(
840
+ self.model.parameters(), self.config.max_grad_norm
841
+ )
842
+ self.optimizer.step()
843
+
844
+ self.scheduler.step()
845
+ self.optimizer.zero_grad(set_to_none=True)
846
+ self.global_step += 1
847
+
848
+ if self.is_main_process:
849
+ avg_loss = epoch_loss / max(num_batches, 1)
850
+ pbar.set_postfix({"gstep": self.global_step, "loss": f"{avg_loss:.4f}"})
851
+
852
+ if self.global_step % self.config.logging_steps == 0 and self.is_main_process:
853
+ lr_embed = self._get_group_lr("embed_tokens") or 0.0
854
+ lr_lm_head = self._get_group_lr("lm_head") or 0.0
855
+ grad_note = ""
856
+ if self.config.debug_grad_norm and grad_norm is not None:
857
+ grad_note = f" | grad_norm={float(grad_norm):.4f}"
858
+ logger.info(
859
+ f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | "
860
+ f"lr_embed={lr_embed:.2e} | lr_lm_head={lr_lm_head:.2e}{grad_note}"
861
+ )
862
+
863
+ if self.global_step % self.config.eval_steps == 0:
864
+ eval_metrics = self.evaluate()
865
+ if self.is_main_process:
866
+ ppl = float(math.exp(min(20.0, eval_metrics["loss"])))
867
+ logger.info(
868
+ f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl={ppl:.3f}, "
869
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
870
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
871
+ )
872
+ self.model.train()
873
+
874
+ avg_epoch_loss = epoch_loss / max(num_batches, 1)
875
+ if self.is_distributed:
876
+ t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32)
877
+ dist.all_reduce(t, op=dist.ReduceOp.SUM)
878
+ avg_epoch_loss = (t / self.world_size).item()
879
+
880
+ if self.is_main_process:
881
+ logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}")
882
+ last_epoch_loss = avg_epoch_loss
883
+
884
+ eval_metrics = self.evaluate()
885
+ if self.is_main_process:
886
+ logger.info(
887
+ f"[EPOCH {epoch + 1} EVAL] "
888
+ f"eval_loss={eval_metrics['loss']:.4f}, "
889
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
890
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
891
+ )
892
+ self._append_eval_metrics(
893
+ eval_metrics,
894
+ phase="epoch",
895
+ epoch=int(epoch + 1),
896
+ train_avg_loss=avg_epoch_loss,
897
+ )
898
+ self.model.train()
899
+
900
+ if self.is_main_process:
901
+ logger.info("Training done, final evaluation")
902
+
903
+ final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples))
904
+ if self.is_main_process:
905
+ ppl = float(math.exp(min(20.0, final_eval["loss"])))
906
+ logger.info(
907
+ f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl={ppl:.3f}, "
908
+ f"em_acc={final_eval['em_acc'] * 100:.2f}%, "
909
+ f"tok_acc={final_eval['tok_acc'] * 100:.2f}%"
910
+ )
911
+ logger.info("Saving final checkpoint")
912
+
913
+ self._append_eval_metrics(
914
+ final_eval,
915
+ phase="final",
916
+ epoch=int(self.config.num_epochs),
917
+ train_avg_loss=last_epoch_loss,
918
+ )
919
+ self.save_final_checkpoint()
920
+
921
+ @torch.no_grad()
922
+ def evaluate(self, print_examples: int = 0) -> Dict[str, float]:
923
+ self.model.eval()
924
+ total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32)
925
+ total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32)
926
+
927
+ total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
928
+ total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
929
+ total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
930
+ total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
931
+ printed = 0
932
+
933
+ for batch in self.eval_dataloader:
934
+ batch = {k: v.to(self.device) for k, v in batch.items()}
935
+ amp_enabled = self.config.fp16 or self.config.bf16
936
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
937
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
938
+ outputs = self.model(
939
+ input_ids=batch["input_ids"],
940
+ attention_mask=batch["attention_mask"],
941
+ labels=batch["labels"],
942
+ return_pred_tokens=True,
943
+ topk=int(self.config.eval_topk) if self.config.eval_topk else 0,
944
+ )
945
+
946
+ if torch.isfinite(outputs["loss"]):
947
+ total_loss += outputs["loss"].detach().float()
948
+ total_batches += 1.0
949
+
950
+ pred_ids = outputs.get("pred_ids", None)
951
+ target_ids = outputs.get("target_ids", None)
952
+ lengths = outputs.get("target_lengths", None)
953
+
954
+ if (
955
+ pred_ids is not None
956
+ and target_ids is not None
957
+ and lengths is not None
958
+ and pred_ids.ndim == 2
959
+ and target_ids.ndim == 2
960
+ and lengths.ndim == 1
961
+ and pred_ids.shape == target_ids.shape
962
+ and pred_ids.shape[0] == lengths.shape[0]
963
+ ):
964
+ pred_cpu = pred_ids.to("cpu", dtype=torch.long)
965
+ tgt_cpu = target_ids.to("cpu", dtype=torch.long)
966
+ len_cpu = lengths.to("cpu", dtype=torch.long)
967
+
968
+ for i in range(int(len_cpu.shape[0])):
969
+ L = int(len_cpu[i].item())
970
+ if L <= 0:
971
+ continue
972
+ p = pred_cpu[i, :L]
973
+ t = tgt_cpu[i, :L]
974
+
975
+ total_tok_correct += torch.tensor(
976
+ float((p == t).sum().item()), device=self.device, dtype=torch.float32
977
+ )
978
+ total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32)
979
+
980
+ if self.tokenizer is not None:
981
+ pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip()
982
+ tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip()
983
+ em = float(pred_text == tgt_text)
984
+ total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32)
985
+ total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32)
986
+
987
+ if self.is_main_process and printed < print_examples:
988
+ logger.info(
989
+ f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}"
990
+ )
991
+ printed += 1
992
+
993
+ if self.is_distributed:
994
+ dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
995
+ dist.all_reduce(total_batches, op=dist.ReduceOp.SUM)
996
+ dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM)
997
+ dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM)
998
+ dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM)
999
+ dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM)
1000
+
1001
+ avg_loss = (total_loss / total_batches.clamp(min=1.0)).item()
1002
+ tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item()
1003
+ em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item()
1004
+
1005
+ return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc}
1006
+
1007
+ def _append_eval_metrics(
1008
+ self,
1009
+ metrics: Dict[str, float],
1010
+ *,
1011
+ phase: str,
1012
+ epoch: Optional[int],
1013
+ train_avg_loss: Optional[float],
1014
+ ) -> None:
1015
+ if not self.is_main_process:
1016
+ return
1017
+ os.makedirs(self.config.output_dir, exist_ok=True)
1018
+ record = {
1019
+ "phase": phase,
1020
+ "epoch": epoch,
1021
+ "global_step": int(self.global_step),
1022
+ "train_avg_loss": None if train_avg_loss is None else float(train_avg_loss),
1023
+ "eval_loss": float(metrics.get("loss", 0.0)),
1024
+ "em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0),
1025
+ "tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0),
1026
+ }
1027
+ metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl")
1028
+ with open(metrics_path, "a") as f:
1029
+ f.write(json.dumps(record) + "\n")
1030
+
1031
+ def save_final_checkpoint(self):
1032
+ ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name)
1033
+ base_model = unwrap_model(self.model)
1034
+
1035
+ # Keep the same checkpoint key layout ("memory_state_dict") for compatibility with v4 tooling.
1036
+ trainable_sd = {
1037
+ name: p.detach().cpu()
1038
+ for name, p in base_model.named_parameters()
1039
+ if p.requires_grad and (("embed_tokens" in name) or ("lm_head" in name))
1040
+ }
1041
+
1042
+ if is_fsdp_model(self.model) and len(trainable_sd) == 0:
1043
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1044
+
1045
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1046
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1047
+ full_sd = self.model.state_dict()
1048
+ trainable_sd = {k: v for k, v in full_sd.items() if ("embed_tokens" in k) or ("lm_head" in k)}
1049
+
1050
+ if self.is_main_process:
1051
+ torch.save(
1052
+ {"memory_state_dict": trainable_sd, "global_step": self.global_step, "config": asdict(self.config)},
1053
+ ckpt_path,
1054
+ )
1055
+ logger.info(f"Saved trainable checkpoint: {ckpt_path}")
1056
+ if self.is_distributed:
1057
+ dist.barrier()
1058
+
1059
+ if self.config.save_full_checkpoint:
1060
+ full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name)
1061
+ if is_fsdp_model(self.model):
1062
+ from torch.distributed.fsdp import (
1063
+ FullyShardedDataParallel as FSDP,
1064
+ StateDictType,
1065
+ FullStateDictConfig,
1066
+ )
1067
+
1068
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1069
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1070
+ full_sd = self.model.state_dict()
1071
+ else:
1072
+ full_sd = unwrap_model(self.model).state_dict()
1073
+
1074
+ if self.is_main_process:
1075
+ torch.save(
1076
+ {"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)},
1077
+ full_ckpt_path,
1078
+ )
1079
+ logger.info(f"Saved full checkpoint: {full_ckpt_path}")
1080
+ if self.is_distributed:
1081
+ dist.barrier()
1082
+
1083
+
1084
+ # =============================================================================
1085
+ # Main
1086
+ # =============================================================================
1087
+
1088
+
1089
+ def main():
1090
+ from transformers import AutoModelForCausalLM, AutoTokenizer
1091
+
1092
+ parser = argparse.ArgumentParser(description="Qwen3 baseline v4 (NO TITANS) - Frozen Backbone Training")
1093
+ parser.add_argument("--fsdp", action="store_true")
1094
+ parser.add_argument("--eval_only", action="store_true")
1095
+ parser.add_argument("--ckpt_path", type=str, default=None)
1096
+ parser.add_argument("--max_samples", type=int, default=None)
1097
+ parser.add_argument("--max_length", type=int, default=None)
1098
+ parser.add_argument("--output_dir", type=str, default=None)
1099
+ parser.add_argument("--num_epochs", type=int, default=None)
1100
+ parser.add_argument("--eval_steps", type=int, default=None)
1101
+ parser.add_argument("--batch_size", type=int, default=None)
1102
+ parser.add_argument("--gradient_accumulation_steps", type=int, default=None)
1103
+ parser.add_argument("--chunk_size", type=int, default=None)
1104
+ parser.add_argument("--gradient_checkpointing", action="store_true")
1105
+ parser.add_argument("--no_chunkwise_backward", action="store_true")
1106
+ parser.add_argument("--lr_embed", type=float, default=None)
1107
+ parser.add_argument("--lr_lm_head", type=float, default=None)
1108
+ parser.add_argument("--debug_grad_norm", action="store_true")
1109
+ args = parser.parse_args()
1110
+
1111
+ config = TrainingConfig()
1112
+
1113
+ # Apply arguments (keep same behavior as v4 where applicable)
1114
+ if args.fsdp:
1115
+ config.use_fsdp = True
1116
+ if args.max_samples is not None:
1117
+ config.max_samples = args.max_samples
1118
+ if args.max_length is not None:
1119
+ config.max_length = int(args.max_length)
1120
+ if args.output_dir is not None:
1121
+ config.output_dir = args.output_dir
1122
+ if args.num_epochs is not None:
1123
+ config.num_epochs = args.num_epochs
1124
+ if args.eval_steps is not None:
1125
+ config.eval_steps = args.eval_steps
1126
+ if args.batch_size is not None:
1127
+ config.batch_size = int(args.batch_size)
1128
+ if args.gradient_accumulation_steps is not None:
1129
+ config.gradient_accumulation_steps = int(args.gradient_accumulation_steps)
1130
+ if args.chunk_size is not None:
1131
+ config.chunk_size = int(args.chunk_size)
1132
+ if args.gradient_checkpointing:
1133
+ config.gradient_checkpointing = True
1134
+ if args.no_chunkwise_backward:
1135
+ config.chunkwise_backward = False
1136
+ if args.lr_embed is not None:
1137
+ config.lr_embed = float(args.lr_embed)
1138
+ if args.lr_lm_head is not None:
1139
+ config.lr_lm_head = float(args.lr_lm_head)
1140
+ if args.debug_grad_norm:
1141
+ config.debug_grad_norm = True
1142
+
1143
+ is_distributed, rank, local_rank, world_size = init_distributed()
1144
+ is_main = rank == 0
1145
+
1146
+ if config.use_fsdp and config.chunkwise_backward:
1147
+ if is_main:
1148
+ logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.")
1149
+ config.chunkwise_backward = False
1150
+
1151
+ if is_distributed and (not config.use_fsdp):
1152
+ if not config.ddp_find_unused_parameters:
1153
+ config.ddp_find_unused_parameters = True
1154
+ if is_main:
1155
+ logger.warning("Enabling DDP find_unused_parameters.")
1156
+
1157
+ torch.manual_seed(config.seed + rank)
1158
+
1159
+ if torch.cuda.is_available():
1160
+ device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda")
1161
+ else:
1162
+ device = torch.device("cpu")
1163
+
1164
+ if torch.cuda.is_available() and config.bf16:
1165
+ bf16_supported = False
1166
+ try:
1167
+ bf16_supported = torch.cuda.is_bf16_supported()
1168
+ except Exception:
1169
+ bf16_supported = False
1170
+ if not bf16_supported:
1171
+ if is_main:
1172
+ logger.warning("bf16 not supported; falling back to fp16.")
1173
+ config.bf16 = False
1174
+ if not config.fp16:
1175
+ config.fp16 = True
1176
+
1177
+ if torch.cuda.is_available() and getattr(config, "use_tf32", False):
1178
+ torch.backends.cuda.matmul.allow_tf32 = True
1179
+ torch.backends.cudnn.allow_tf32 = True
1180
+ try:
1181
+ torch.set_float32_matmul_precision("high")
1182
+ except Exception:
1183
+ pass
1184
+
1185
+ if is_main:
1186
+ logger.info("=" * 70)
1187
+ logger.info("Qwen3-4B baseline v4 Training (NO TITANS, FROZEN BACKBONE)")
1188
+ logger.info("=" * 70)
1189
+ logger.info(f"distributed={is_distributed}, world_size={world_size}")
1190
+ logger.info(f"model_path={config.model_path}")
1191
+ logger.info(f"data_path={config.data_path}")
1192
+ logger.info(f"output_dir={config.output_dir}")
1193
+ logger.info(f"max_samples={config.max_samples}")
1194
+ logger.info(f"max_length={config.max_length}")
1195
+ logger.info(f"num_epochs={config.num_epochs}")
1196
+ logger.info(f"chunk_size={config.chunk_size}")
1197
+ logger.info("Trainable: embed_tokens + lm_head")
1198
+ logger.info("=" * 70)
1199
+
1200
+ tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True)
1201
+ if tokenizer.pad_token is None:
1202
+ tokenizer.pad_token = tokenizer.eos_token
1203
+
1204
+ # Disable flash-attn / torchao detection (same as v4)
1205
+ try:
1206
+ import transformers
1207
+ from transformers.utils import import_utils as _import_utils
1208
+
1209
+ def _disabled(*args, **kwargs):
1210
+ return False
1211
+
1212
+ _import_utils.is_flash_attn_2_available = _disabled
1213
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"):
1214
+ transformers.utils.is_flash_attn_2_available = _disabled
1215
+ if hasattr(_import_utils, "is_torchao_available"):
1216
+ _import_utils.is_torchao_available = _disabled
1217
+ if hasattr(_import_utils, "is_torchvision_available"):
1218
+ _import_utils.is_torchvision_available = _disabled
1219
+ except Exception as e:
1220
+ if is_main:
1221
+ logger.warning(f"Disable checks failed (ignored): {e}")
1222
+
1223
+ torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32)
1224
+
1225
+ qwen_model = AutoModelForCausalLM.from_pretrained(
1226
+ config.model_path,
1227
+ torch_dtype=torch_dtype,
1228
+ device_map=None,
1229
+ trust_remote_code=True,
1230
+ attn_implementation="sdpa",
1231
+ low_cpu_mem_usage=True,
1232
+ )
1233
+ qwen_model.to(device)
1234
+ qwen_model.config.use_cache = False
1235
+ if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"):
1236
+ qwen_model.gradient_checkpointing_enable()
1237
+
1238
+ train_dataset = BABILongDataset(
1239
+ config.data_path,
1240
+ tokenizer,
1241
+ max_length=config.max_length,
1242
+ answer_reserve_tokens=config.answer_reserve_tokens,
1243
+ label_prefix_tokens=config.label_prefix_tokens,
1244
+ max_samples=config.max_samples,
1245
+ )
1246
+
1247
+ train_size = int(0.9 * len(train_dataset))
1248
+ eval_size = len(train_dataset) - train_size
1249
+ train_dataset, eval_dataset = torch.utils.data.random_split(
1250
+ train_dataset,
1251
+ [train_size, eval_size],
1252
+ generator=torch.Generator().manual_seed(config.seed),
1253
+ )
1254
+
1255
+ train_sampler = None
1256
+ eval_sampler = None
1257
+ if is_distributed:
1258
+ from torch.utils.data.distributed import DistributedSampler
1259
+
1260
+ train_sampler = DistributedSampler(
1261
+ train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed
1262
+ )
1263
+ eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False)
1264
+
1265
+ train_dataloader = DataLoader(
1266
+ train_dataset,
1267
+ batch_size=config.batch_size,
1268
+ shuffle=(train_sampler is None),
1269
+ sampler=train_sampler,
1270
+ collate_fn=collate_fn,
1271
+ num_workers=0,
1272
+ )
1273
+ eval_dataloader = DataLoader(
1274
+ eval_dataset,
1275
+ batch_size=config.batch_size,
1276
+ shuffle=False,
1277
+ sampler=eval_sampler,
1278
+ collate_fn=collate_fn,
1279
+ num_workers=0,
1280
+ )
1281
+
1282
+ model = QwenBaselineForBABILongV4(qwen_model, config)
1283
+ model.to(device)
1284
+
1285
+ # Same multi-GPU strategy as v4: disable DDP under cross-chunk graphs, use manual sync
1286
+ use_ddp = is_distributed and world_size > 1
1287
+ use_manual_grad_sync = False
1288
+
1289
+ if use_ddp and not config.chunkwise_backward:
1290
+ if is_main:
1291
+ logger.info("=" * 70)
1292
+ logger.info("Cross-chunk graph with multi-GPU: using MANUAL gradient sync (NO DDP wrap)")
1293
+ logger.info("=" * 70)
1294
+ use_ddp = False
1295
+ use_manual_grad_sync = True
1296
+
1297
+ if use_ddp:
1298
+ if config.use_fsdp:
1299
+ from functools import partial
1300
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision
1301
+ from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
1302
+ from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer
1303
+
1304
+ mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype)
1305
+ auto_wrap = partial(transformer_auto_wrap_policy, transformer_layer_cls={Qwen3DecoderLayer})
1306
+
1307
+ model = FSDP(
1308
+ model,
1309
+ auto_wrap_policy=auto_wrap,
1310
+ mixed_precision=mp_policy,
1311
+ device_id=torch.cuda.current_device(),
1312
+ use_orig_params=config.fsdp_use_orig_params,
1313
+ )
1314
+ else:
1315
+ model = DDP(
1316
+ model,
1317
+ device_ids=[local_rank],
1318
+ output_device=local_rank,
1319
+ find_unused_parameters=config.ddp_find_unused_parameters,
1320
+ )
1321
+
1322
+ trainer = Trainer(
1323
+ model=model,
1324
+ train_dataloader=train_dataloader,
1325
+ eval_dataloader=eval_dataloader,
1326
+ config=config,
1327
+ rank=rank,
1328
+ world_size=world_size,
1329
+ is_distributed=is_distributed,
1330
+ tokenizer=tokenizer,
1331
+ use_manual_grad_sync=use_manual_grad_sync,
1332
+ )
1333
+
1334
+ if args.eval_only:
1335
+ ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name)
1336
+ if is_main:
1337
+ logger.info(f"eval_only: loading checkpoint: {ckpt_path}")
1338
+ ckpt = torch.load(ckpt_path, map_location="cpu")
1339
+
1340
+ sd = ckpt.get("memory_state_dict", {})
1341
+ if len(sd) > 0:
1342
+ unwrap_model(model).load_state_dict(sd, strict=False)
1343
+
1344
+ eval_metrics = trainer.evaluate()
1345
+ if is_main:
1346
+ ppl = float(math.exp(min(20.0, eval_metrics["loss"])))
1347
+ logger.info(
1348
+ f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl={ppl:.3f}, "
1349
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1350
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1351
+ )
1352
+ cleanup_distributed()
1353
+ return
1354
+
1355
+ trainer.train()
1356
+ cleanup_distributed()
1357
+
1358
+
1359
+ if __name__ == "__main__":
1360
+ main()
1361
+
examples/train_qwen_titans_babilong.py ADDED
@@ -0,0 +1,1664 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Qwen3 + Titans training on BABILong QA1 (32k).
3
+
4
+ Key ideas:
5
+ - Fixed-length 32k samples for DDP/FSDP stability.
6
+ - Stream long sequences by chunk (default 8k).
7
+ - Insert Titans memory modules into Qwen layers (stride configurable).
8
+ """
9
+
10
+ import os
11
+ import json
12
+ import math
13
+ import argparse
14
+ import logging
15
+ import weakref
16
+ from contextlib import nullcontext
17
+ from dataclasses import dataclass, asdict
18
+ from typing import Optional, Dict, Any, List, Tuple, Callable
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ import torch.distributed as dist
24
+ from torch.utils.data import Dataset, DataLoader
25
+ from torch.optim import AdamW
26
+ from torch.optim.lr_scheduler import CosineAnnealingLR
27
+ from torch.nn.parallel import DistributedDataParallel as DDP
28
+ from tqdm import tqdm
29
+
30
+ from einops import rearrange
31
+
32
+ # add repo root to sys.path
33
+ import sys
34
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
35
+
36
+ # Titans components
37
+ from titans_pytorch import NeuralMemory, MemoryMLP
38
+ from titans_pytorch.neural_memory import NeuralMemState
39
+
40
+ logging.basicConfig(
41
+ level=logging.INFO,
42
+ format="%(asctime)s - %(levelname)s - %(message)s"
43
+ )
44
+ logger = logging.getLogger(__name__)
45
+
46
+
47
+ @dataclass
48
+ class TrainingConfig:
49
+ # paths
50
+ model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554"
51
+ data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json"
52
+ output_dir: str = "./outputs/qwen_titans_babilong"
53
+
54
+ # training
55
+ num_epochs: int = 10
56
+ batch_size: int = 2
57
+ gradient_accumulation_steps: int = 8
58
+ max_grad_norm: float = 1.0
59
+
60
+ # learning rates
61
+ lr_memory: float = 1e-4
62
+ lr_pretrained: float = 5e-6
63
+ weight_decay: float = 0.01
64
+ warmup_steps: int = 100
65
+
66
+ # streaming / memory
67
+ chunk_size: int = 8192
68
+ use_memory: bool = True
69
+ memory_chunk_size: int = 128
70
+ memory_batch_size: int = 128
71
+ memory_heads: int = 8
72
+ memory_dim_head: int = 64
73
+ memory_depth: int = 1
74
+ memory_layer_stride: int = 8
75
+ memory_fp32: bool = True
76
+ detach_mem_state: bool = True
77
+ freeze_base_model: bool = False # 冻结 Qwen base,只训练记忆模块
78
+
79
+ # evaluation / logging
80
+ eval_steps: int = 200
81
+ eval_topk: int = 0
82
+ logging_steps: int = 10
83
+ log_every_batches: int = 80
84
+ final_eval_print_examples: int = 10
85
+ debug_data_samples: int = 0
86
+ debug_label_batches: int = 0
87
+ debug_eval_stats: bool = False
88
+ debug_grad_norm: bool = False
89
+
90
+ # precision
91
+ bf16: bool = True
92
+ fp16: bool = False
93
+ use_tf32: bool = True
94
+ gradient_checkpointing: bool = False
95
+ chunkwise_backward: bool = True
96
+ chunkwise_backward: bool = True
97
+
98
+ # data
99
+ max_length: int = 32768
100
+ answer_reserve_tokens: int = 64
101
+ label_prefix_tokens: int = 0
102
+ max_samples: Optional[int] = 500 # 快速实验用 500,完整训练可设置更大值
103
+
104
+ # fsdp
105
+ use_fsdp: bool = False
106
+ fsdp_use_orig_params: bool = True
107
+ ddp_find_unused_parameters: bool = False
108
+
109
+ # checkpoint
110
+ save_full_checkpoint: bool = True
111
+ final_ckpt_name: str = "final_memory_checkpoint.pt"
112
+ final_full_ckpt_name: str = "final_full_checkpoint.pt"
113
+
114
+ seed: int = 42
115
+
116
+
117
+ class BABILongDataset(Dataset):
118
+ def __init__(
119
+ self,
120
+ data_path: str,
121
+ tokenizer,
122
+ max_length: int = 32768,
123
+ answer_reserve_tokens: int = 64,
124
+ label_prefix_tokens: int = 0,
125
+ max_samples: Optional[int] = None,
126
+ ):
127
+ self.tokenizer = tokenizer
128
+ self.max_length = max_length
129
+ self.answer_reserve_tokens = answer_reserve_tokens
130
+ self.label_prefix_tokens = int(label_prefix_tokens)
131
+
132
+ logger.info(f"Loading dataset: {data_path}")
133
+ with open(data_path, "r") as f:
134
+ self.data = json.load(f)
135
+
136
+ if max_samples:
137
+ self.data = self.data[:max_samples]
138
+
139
+ logger.info(f"Dataset size: {len(self.data)}")
140
+
141
+ def __len__(self):
142
+ return len(self.data)
143
+
144
+ def __getitem__(self, idx):
145
+ item = self.data[idx]
146
+ text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:"
147
+ target = item["target"]
148
+
149
+ pad_id = self.tokenizer.pad_token_id or 0
150
+ reserve = int(self.answer_reserve_tokens)
151
+
152
+ prompt_ids = self.tokenizer(
153
+ text,
154
+ max_length=max(self.max_length - reserve, 1),
155
+ truncation=True,
156
+ add_special_tokens=True,
157
+ return_tensors="pt",
158
+ ).input_ids.squeeze(0)
159
+
160
+ answer_ids = self.tokenizer(
161
+ f" {target}",
162
+ add_special_tokens=False,
163
+ return_tensors="pt",
164
+ ).input_ids.squeeze(0)
165
+
166
+ available = max(self.max_length - prompt_ids.numel(), 0)
167
+ answer_ids = answer_ids[:available]
168
+
169
+ input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length]
170
+
171
+ labels = torch.full_like(input_ids, fill_value=-100)
172
+ if answer_ids.numel() > 0:
173
+ start = prompt_ids.numel()
174
+ end = min(start + answer_ids.numel(), labels.numel())
175
+ labels[start:end] = input_ids[start:end]
176
+ if self.label_prefix_tokens > 0:
177
+ prefix = min(start, self.label_prefix_tokens)
178
+ if prefix > 0:
179
+ labels[start - prefix:start] = input_ids[start - prefix:start]
180
+
181
+ seq_len = input_ids.numel()
182
+ if seq_len < self.max_length:
183
+ pad_len = self.max_length - seq_len
184
+ input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id))
185
+ labels = F.pad(labels, (0, pad_len), value=-100)
186
+ attention_mask = torch.cat(
187
+ [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)],
188
+ dim=0,
189
+ )
190
+ else:
191
+ attention_mask = torch.ones(self.max_length, dtype=torch.long)
192
+
193
+ return {
194
+ "input_ids": input_ids.to(dtype=torch.long),
195
+ "labels": labels.to(dtype=torch.long),
196
+ "attention_mask": attention_mask,
197
+ }
198
+
199
+
200
+ def collate_fn(batch):
201
+ keys = batch[0].keys()
202
+ return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys}
203
+
204
+
205
+ def _get_raw_dataset_item(dataset, idx: int) -> Optional[Dict[str, Any]]:
206
+ base = dataset
207
+ true_idx = idx
208
+ if isinstance(dataset, torch.utils.data.Subset):
209
+ base = dataset.dataset
210
+ true_idx = dataset.indices[idx]
211
+ if isinstance(base, BABILongDataset) and hasattr(base, "data"):
212
+ try:
213
+ return base.data[true_idx]
214
+ except Exception:
215
+ return None
216
+ return None
217
+
218
+
219
+ def log_dataset_debug_stats(dataset, tokenizer, name: str, num_samples: int) -> None:
220
+ if num_samples <= 0:
221
+ return
222
+ total = len(dataset)
223
+ if total <= 0:
224
+ logger.warning(f"[DATA DEBUG] {name}: empty dataset")
225
+ return
226
+
227
+ n = min(int(num_samples), total)
228
+ zero_label = 0
229
+ total_label_tokens = 0
230
+ total_loss_tokens = 0
231
+ total_attn_tokens = 0
232
+
233
+ for i in range(n):
234
+ sample = dataset[i]
235
+ labels = sample["labels"]
236
+ attn = sample["attention_mask"]
237
+
238
+ label_mask = labels != -100
239
+ label_tokens = int(label_mask.sum().item())
240
+ loss_tokens = int((labels[1:] != -100).sum().item()) if labels.numel() > 1 else 0
241
+ attn_tokens = int(attn.sum().item())
242
+
243
+ total_label_tokens += label_tokens
244
+ total_loss_tokens += loss_tokens
245
+ total_attn_tokens += attn_tokens
246
+ if label_tokens == 0:
247
+ zero_label += 1
248
+
249
+ if i < min(3, n):
250
+ label_pos = label_mask.nonzero(as_tuple=False).view(-1)
251
+ first_label = int(label_pos[0].item()) if label_pos.numel() > 0 else -1
252
+ last_label = int(label_pos[-1].item()) if label_pos.numel() > 0 else -1
253
+
254
+ decoded = ""
255
+ if tokenizer is not None and label_pos.numel() > 0:
256
+ answer_ids = labels[label_pos].tolist()
257
+ decoded = tokenizer.decode(answer_ids, skip_special_tokens=True).strip()
258
+ if len(decoded) > 200:
259
+ decoded = decoded[:200] + "..."
260
+
261
+ raw_item = _get_raw_dataset_item(dataset, i)
262
+ target_chars = None
263
+ target_tokens = None
264
+ if raw_item is not None and tokenizer is not None:
265
+ target_text = str(raw_item.get("target", ""))
266
+ target_chars = len(target_text)
267
+ target_ids = tokenizer(
268
+ f" {target_text}",
269
+ add_special_tokens=False,
270
+ return_tensors="pt",
271
+ ).input_ids.squeeze(0)
272
+ target_tokens = int(target_ids.numel())
273
+
274
+ logger.info(
275
+ f"[DATA DEBUG] {name} sample {i}: attn_tokens={attn_tokens}, "
276
+ f"label_tokens={label_tokens}, loss_tokens={loss_tokens}, "
277
+ f"label_span=[{first_label},{last_label}]"
278
+ )
279
+ if target_chars is not None or decoded:
280
+ logger.info(
281
+ f"[DATA DEBUG] {name} target_chars={target_chars}, "
282
+ f"target_tokens={target_tokens}, decoded_answer={repr(decoded)}"
283
+ )
284
+
285
+ avg_label = total_label_tokens / max(n, 1)
286
+ avg_loss = total_loss_tokens / max(n, 1)
287
+ avg_attn = total_attn_tokens / max(n, 1)
288
+ logger.info(
289
+ f"[DATA DEBUG] {name} summary: samples={n}, zero_label_samples={zero_label}, "
290
+ f"avg_label_tokens={avg_label:.2f}, avg_loss_tokens={avg_loss:.2f}, avg_attn_tokens={avg_attn:.2f}"
291
+ )
292
+
293
+
294
+ class QwenDecoderLayerWithTitansMemory(nn.Module):
295
+ def __init__(
296
+ self,
297
+ base_layer: nn.Module,
298
+ *,
299
+ hidden_size: int,
300
+ chunk_size: int,
301
+ batch_size: int,
302
+ dim_head: int,
303
+ num_heads: int,
304
+ memory_depth: int,
305
+ memory_fp32: bool,
306
+ detach_mem_state: bool,
307
+ parent_model: Optional[nn.Module] = None,
308
+ ):
309
+ super().__init__()
310
+ self.layer = base_layer
311
+ self.memory_fp32 = memory_fp32
312
+ self.detach_mem_state = bool(detach_mem_state)
313
+ self.memory_state: Optional[NeuralMemState] = None
314
+ self.parent_model_ref = weakref.ref(parent_model) if parent_model is not None else None
315
+
316
+ memory_model = MemoryMLP(
317
+ dim=dim_head,
318
+ depth=memory_depth,
319
+ expansion_factor=2.0,
320
+ )
321
+
322
+ self.neural_memory = NeuralMemory(
323
+ dim=hidden_size,
324
+ chunk_size=chunk_size,
325
+ batch_size=batch_size,
326
+ dim_head=dim_head,
327
+ heads=num_heads,
328
+ model=memory_model,
329
+ momentum=True,
330
+ momentum_order=1,
331
+ qk_rmsnorm=True,
332
+ pre_rmsnorm=True,
333
+ default_step_transform_max_lr=1e-2,
334
+ init_adaptive_step_bias=-6.0,
335
+ max_grad_norm=1.0,
336
+ spectral_norm_surprises=True,
337
+ use_accelerated_scan=False,
338
+ )
339
+
340
+ self.mem_gate = nn.Sequential(
341
+ nn.Linear(hidden_size * 2, hidden_size),
342
+ nn.Sigmoid(),
343
+ )
344
+
345
+ try:
346
+ layer_device = next(base_layer.parameters()).device
347
+ layer_dtype = next(base_layer.parameters()).dtype
348
+ except StopIteration:
349
+ layer_device = None
350
+ layer_dtype = None
351
+
352
+ if layer_device is not None:
353
+ mem_dtype = torch.float32 if memory_fp32 else layer_dtype
354
+ self.neural_memory = self.neural_memory.to(device=layer_device, dtype=mem_dtype)
355
+ if layer_dtype is not None:
356
+ self.mem_gate = self.mem_gate.to(device=layer_device, dtype=layer_dtype)
357
+ else:
358
+ self.mem_gate = self.mem_gate.to(device=layer_device)
359
+
360
+ def reset_memory_state(self):
361
+ self.memory_state = None
362
+
363
+ def _get_store_mask(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]:
364
+ parent_model = self.parent_model_ref() if self.parent_model_ref is not None else None
365
+ if parent_model is None or not hasattr(parent_model, "_mem_store_mask"):
366
+ return None
367
+ store_mask = getattr(parent_model, "_mem_store_mask")
368
+ if store_mask is None:
369
+ return None
370
+ store_mask = store_mask.to(device=hidden_states.device).bool()
371
+ if store_mask.shape[:2] != hidden_states.shape[:2]:
372
+ return None
373
+ return store_mask
374
+
375
+ def forward(self, *args, **kwargs):
376
+ outputs = self.layer(*args, **kwargs)
377
+
378
+ if isinstance(outputs, (tuple, list)):
379
+ hidden_states = outputs[0]
380
+ rest = outputs[1:]
381
+ else:
382
+ hidden_states = outputs
383
+ rest = None
384
+
385
+ full_store_mask = self._get_store_mask(hidden_states)
386
+ mem_inp = hidden_states.float() if self.memory_fp32 else hidden_states
387
+
388
+ store_seq = None
389
+ store_mask = full_store_mask
390
+ if store_mask is not None:
391
+ store_seq = mem_inp
392
+ if store_mask.shape[1] > 0 and not store_mask[:, 0].any():
393
+ store_seq = store_seq[:, 1:]
394
+ store_mask = store_mask[:, 1:]
395
+
396
+ store_chunk = self.neural_memory.store_chunk_size
397
+ remainder = store_seq.shape[1] % store_chunk
398
+ if remainder != 0:
399
+ store_seq = store_seq[:, :-remainder]
400
+ store_mask = store_mask[:, :-remainder]
401
+
402
+ if store_mask is not None and store_seq is not None:
403
+ if store_mask.shape[1] != store_seq.shape[1]:
404
+ min_len = min(store_mask.shape[1], store_seq.shape[1])
405
+ store_seq = store_seq[:, :min_len]
406
+ store_mask = store_mask[:, :min_len]
407
+
408
+ if store_seq.shape[1] == 0:
409
+ store_seq = None
410
+ store_mask = None
411
+
412
+ mem_ctx = (
413
+ torch.amp.autocast(device_type=hidden_states.device.type, enabled=False)
414
+ if self.memory_fp32
415
+ else nullcontext()
416
+ )
417
+ with mem_ctx:
418
+ retrieved, next_state = self.neural_memory(
419
+ mem_inp,
420
+ store_seq=store_seq,
421
+ state=self.memory_state,
422
+ store_mask=store_mask,
423
+ detach_mem_state=self.detach_mem_state,
424
+ )
425
+ self.memory_state = next_state
426
+
427
+ if retrieved is not None:
428
+ retrieved = retrieved.to(dtype=hidden_states.dtype)
429
+ if full_store_mask is not None and full_store_mask.shape[:2] == retrieved.shape[:2]:
430
+ retrieved = retrieved * full_store_mask.unsqueeze(-1).to(dtype=retrieved.dtype)
431
+ gate = self.mem_gate(torch.cat([hidden_states, retrieved], dim=-1))
432
+ hidden_states = hidden_states + gate * retrieved
433
+
434
+ if rest is None:
435
+ return hidden_states
436
+ return (hidden_states, *rest)
437
+
438
+
439
+ class QwenTitansForBABILong(nn.Module):
440
+ def __init__(self, qwen_model, config: TrainingConfig):
441
+ super().__init__()
442
+ self.qwen = qwen_model
443
+ self.config = config
444
+ self.hidden_size = qwen_model.config.hidden_size
445
+ self.use_memory = bool(getattr(config, "use_memory", True))
446
+
447
+ if self.use_memory:
448
+ self.memory_layer_stride = int(getattr(config, "memory_layer_stride", 4))
449
+ self.memory_layer_indices = [
450
+ idx for idx in range(len(self.qwen.model.layers)) if idx % self.memory_layer_stride == 0
451
+ ]
452
+
453
+ for layer_idx in self.memory_layer_indices:
454
+ base_layer = self.qwen.model.layers[layer_idx]
455
+ wrapped = QwenDecoderLayerWithTitansMemory(
456
+ base_layer,
457
+ hidden_size=self.hidden_size,
458
+ chunk_size=config.memory_chunk_size,
459
+ batch_size=config.memory_batch_size,
460
+ dim_head=config.memory_dim_head,
461
+ num_heads=config.memory_heads,
462
+ memory_depth=config.memory_depth,
463
+ memory_fp32=config.memory_fp32,
464
+ detach_mem_state=config.detach_mem_state,
465
+ parent_model=self.qwen.model,
466
+ )
467
+ self.qwen.model.layers[layer_idx] = wrapped
468
+ else:
469
+ self.memory_layer_stride = 0
470
+ self.memory_layer_indices = []
471
+
472
+ if self.use_memory:
473
+ logger.info("[QwenTitansForBABILong] Initialized")
474
+ logger.info(f" - hidden_size: {self.hidden_size}")
475
+ logger.info(f" - chunk_size: {config.chunk_size}")
476
+ logger.info(f" - memory_layer_stride: {self.memory_layer_stride}")
477
+ logger.info(f" - memory_layers: {self.memory_layer_indices}")
478
+ else:
479
+ logger.info("[QwenTitansForBABILong] Initialized (memory disabled)")
480
+ logger.info(f" - hidden_size: {self.hidden_size}")
481
+ logger.info(f" - chunk_size: {config.chunk_size}")
482
+
483
+ self._memory_layers = [
484
+ layer for layer in self.qwen.model.layers if isinstance(layer, QwenDecoderLayerWithTitansMemory)
485
+ ]
486
+ self.qwen.model._mem_store_mask = None
487
+
488
+ def _split_into_chunks(self, tensor, chunk_size):
489
+ seq_len = tensor.shape[1]
490
+ chunks = []
491
+ for start in range(0, seq_len, chunk_size):
492
+ end = min(start + chunk_size, seq_len)
493
+ chunks.append((start, end, tensor[:, start:end]))
494
+ return chunks
495
+
496
+ def reset_memory_states(self):
497
+ for layer in self._memory_layers:
498
+ layer.reset_memory_state()
499
+
500
+ def _set_mem_store_mask(
501
+ self,
502
+ chunk_ids: torch.Tensor,
503
+ chunk_mask: Optional[torch.Tensor],
504
+ chunk_start: int,
505
+ ) -> None:
506
+ if not self.use_memory:
507
+ self.qwen.model._mem_store_mask = None
508
+ return
509
+ if chunk_mask is None:
510
+ if chunk_start > 0:
511
+ store_mask = torch.ones_like(chunk_ids, dtype=torch.bool)
512
+ store_mask[:, 0] = False
513
+ else:
514
+ store_mask = None
515
+ else:
516
+ store_mask = chunk_mask.to(device=chunk_ids.device).bool()
517
+ if chunk_start > 0:
518
+ store_mask[:, 0] = False
519
+ self.qwen.model._mem_store_mask = store_mask
520
+
521
+ def get_memory_modules(self) -> List[nn.Module]:
522
+ if not self._memory_layers:
523
+ return []
524
+ modules = []
525
+ for layer in self._memory_layers:
526
+ modules.append(layer.neural_memory)
527
+ modules.append(layer.mem_gate)
528
+ return modules
529
+
530
+ def forward(
531
+ self,
532
+ input_ids: torch.Tensor,
533
+ attention_mask: Optional[torch.Tensor] = None,
534
+ labels: Optional[torch.Tensor] = None,
535
+ return_pred_tokens: bool = False,
536
+ topk: int = 0,
537
+ chunkwise_backward: bool = False,
538
+ loss_scale: Optional[float] = None,
539
+ backward_fn: Optional[Callable[[torch.Tensor], None]] = None,
540
+ chunk_start: Optional[int] = None,
541
+ chunk_end: Optional[int] = None,
542
+ reset_mem_state: bool = False,
543
+ ) -> Dict[str, torch.Tensor]:
544
+ if chunk_start is not None or chunk_end is not None:
545
+ start = 0 if chunk_start is None else int(chunk_start)
546
+ end = int(chunk_end) if chunk_end is not None else None
547
+ return self._forward_single_chunk(
548
+ input_ids=input_ids,
549
+ attention_mask=attention_mask,
550
+ labels=labels,
551
+ chunk_start=start,
552
+ chunk_end=end,
553
+ reset_mem_state=reset_mem_state,
554
+ )
555
+ batch_size, seq_len = input_ids.shape
556
+ chunk_size = self.config.chunk_size
557
+ chunks = self._split_into_chunks(input_ids, chunk_size)
558
+
559
+ self.reset_memory_states()
560
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
561
+ total_loss_sum = None
562
+ total_loss_tokens = 0
563
+ topk_correct = None
564
+ topk_total = None
565
+
566
+ pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
567
+ target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
568
+ if topk and topk > 0:
569
+ device = input_ids.device
570
+ topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32)
571
+ topk_total = torch.tensor(0.0, device=device, dtype=torch.float32)
572
+
573
+ for start, end, _ in chunks:
574
+ proc_start = max(0, start - 1)
575
+ chunk_ids = input_ids[:, proc_start:end]
576
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
577
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
578
+
579
+ self._set_mem_store_mask(chunk_ids, chunk_mask, start)
580
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
581
+ if self.use_memory:
582
+ self.qwen.model._mem_store_mask = None
583
+
584
+ if chunk_labels is not None and (chunk_labels != -100).any():
585
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
586
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
587
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
588
+
589
+ valid = shift_labels != -100
590
+ if valid.any():
591
+ hs = shift_hidden[valid]
592
+ targets = shift_labels[valid]
593
+
594
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
595
+
596
+ logits = self.qwen.lm_head(hs)
597
+ logits = logits.float()
598
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
599
+ targets = targets.to(device=logits.device)
600
+
601
+ chunk_loss_sum = loss_fct_sum(logits, targets)
602
+ if total_loss_sum is None:
603
+ total_loss_sum = chunk_loss_sum
604
+ else:
605
+ total_loss_sum = total_loss_sum + chunk_loss_sum
606
+ total_loss_tokens += targets.numel()
607
+
608
+ if topk and topk > 0:
609
+ k = min(int(topk), logits.shape[-1])
610
+ topk_ids = torch.topk(logits, k=k, dim=-1).indices
611
+ correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1)
612
+ topk_correct = topk_correct + correct.float().sum()
613
+ topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device)
614
+
615
+ if return_pred_tokens:
616
+ idx = valid.nonzero(as_tuple=False)
617
+ pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist()
618
+ tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist()
619
+ b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist()
620
+
621
+ for i, b_idx in enumerate(b_idx_flat):
622
+ pred_tokens_by_sample[b_idx].append(int(pred_flat[i]))
623
+ target_tokens_by_sample[b_idx].append(int(tgt_flat[i]))
624
+
625
+ if total_loss_sum is None or total_loss_tokens == 0:
626
+ device = next(self.qwen.parameters()).device
627
+ loss = torch.zeros((), device=device, dtype=torch.float32)
628
+ else:
629
+ loss = total_loss_sum / total_loss_tokens
630
+
631
+ out: Dict[str, torch.Tensor] = {"loss": loss}
632
+ if return_pred_tokens:
633
+ lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long)
634
+ max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0
635
+ if max_len > 0:
636
+ pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
637
+ tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
638
+ for b in range(batch_size):
639
+ L = int(lengths[b].item())
640
+ if L > 0:
641
+ pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long)
642
+ tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long)
643
+ else:
644
+ pred_mat = torch.empty((batch_size, 0), dtype=torch.long)
645
+ tgt_mat = torch.empty((batch_size, 0), dtype=torch.long)
646
+ out["pred_ids"] = pred_mat
647
+ out["target_ids"] = tgt_mat
648
+ out["target_lengths"] = lengths
649
+ if topk and topk > 0 and topk_correct is not None and topk_total is not None:
650
+ out["topk_correct"] = topk_correct
651
+ out["topk_total"] = topk_total
652
+ return out
653
+
654
+ def _forward_single_chunk(
655
+ self,
656
+ input_ids: torch.Tensor,
657
+ attention_mask: Optional[torch.Tensor],
658
+ labels: Optional[torch.Tensor],
659
+ chunk_start: int,
660
+ chunk_end: Optional[int],
661
+ reset_mem_state: bool,
662
+ ) -> Dict[str, torch.Tensor]:
663
+ if reset_mem_state:
664
+ self.reset_memory_states()
665
+
666
+ seq_len = input_ids.shape[1]
667
+ end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len)
668
+ end = min(int(end), seq_len)
669
+ start = max(0, int(chunk_start))
670
+
671
+ proc_start = max(0, start - 1)
672
+ chunk_ids = input_ids[:, proc_start:end]
673
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
674
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
675
+
676
+ self._set_mem_store_mask(chunk_ids, chunk_mask, start)
677
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
678
+ if self.use_memory:
679
+ self.qwen.model._mem_store_mask = None
680
+
681
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
682
+ total_loss_sum = None
683
+ total_loss_tokens = 0
684
+
685
+ if chunk_labels is not None and (chunk_labels != -100).any():
686
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
687
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
688
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
689
+
690
+ valid = shift_labels != -100
691
+ if valid.any():
692
+ hs = shift_hidden[valid]
693
+ targets = shift_labels[valid]
694
+
695
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
696
+ logits = self.qwen.lm_head(hs)
697
+ logits = logits.float()
698
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
699
+ targets = targets.to(device=logits.device)
700
+
701
+ total_loss_sum = loss_fct_sum(logits, targets)
702
+ total_loss_tokens = targets.numel()
703
+
704
+ if total_loss_sum is None:
705
+ # 创建一个有梯度图的零 loss,通过 hidden_full 建立连接
706
+ # 这对 DDP 至关重要:确保所有 rank 的 backward 调用一致
707
+ total_loss_sum = (hidden_full.float().sum() * 0.0)
708
+
709
+ return {
710
+ "loss_sum": total_loss_sum,
711
+ "loss_tokens": total_loss_tokens,
712
+ "has_grad": True, # 现在总是有梯度图
713
+ }
714
+
715
+ def _process_chunk(
716
+ self,
717
+ chunk_ids: torch.Tensor,
718
+ chunk_attention_mask: Optional[torch.Tensor] = None,
719
+ ) -> torch.Tensor:
720
+ if hasattr(self.qwen.model, "embed_tokens"):
721
+ token_embeds = self.qwen.model.embed_tokens(chunk_ids)
722
+ else:
723
+ token_embeds = self.qwen.get_input_embeddings()(chunk_ids)
724
+
725
+ outputs = self.qwen.model(
726
+ inputs_embeds=token_embeds,
727
+ attention_mask=chunk_attention_mask,
728
+ use_cache=False,
729
+ output_hidden_states=False,
730
+ return_dict=True,
731
+ )
732
+ return outputs.last_hidden_state
733
+
734
+ def freeze_base_model(self):
735
+ """
736
+ 冻结 Qwen base 模型的大部分参数,保留记忆模块和独立的 lm_head 可训练。
737
+
738
+ 可训练的参数包括:
739
+ - neural_memory: Q/K/V projections, adaptive lr 等(Memory MLP 通过 Surprise 前向更新)
740
+ - mem_gate: 控制记忆输出和原始输出的混合
741
+ - lm_head: 独立的输出层(解开 tied weights)
742
+
743
+ 冻结的参数:
744
+ - qwen.model.embed_tokens (保持输入分布不变!)
745
+ - qwen.model.layers (除了 neural_memory 和 mem_gate)
746
+ - qwen.model.norm
747
+ """
748
+ frozen_count = 0
749
+ trainable_count = 0
750
+ lm_head_count = 0
751
+
752
+ # 关键:解开 tied weights!
753
+ # 如果 lm_head 和 embed_tokens 共享权重,需要创建独立的 lm_head
754
+ if hasattr(self.qwen, 'lm_head') and hasattr(self.qwen.model, 'embed_tokens'):
755
+ lm_head_weight = self.qwen.lm_head.weight
756
+ embed_weight = self.qwen.model.embed_tokens.weight
757
+ has_tied_weights = lm_head_weight.data_ptr() == embed_weight.data_ptr()
758
+
759
+ if has_tied_weights:
760
+ logger.info("[freeze_base_model] Detected tied weights - untying lm_head from embed_tokens")
761
+ # 创建独立的 lm_head 权重(复制当前权重)
762
+ new_lm_head = nn.Linear(
763
+ self.qwen.lm_head.in_features,
764
+ self.qwen.lm_head.out_features,
765
+ bias=self.qwen.lm_head.bias is not None,
766
+ device=lm_head_weight.device,
767
+ dtype=lm_head_weight.dtype,
768
+ )
769
+ # 复制权重
770
+ with torch.no_grad():
771
+ new_lm_head.weight.copy_(lm_head_weight)
772
+ if self.qwen.lm_head.bias is not None and new_lm_head.bias is not None:
773
+ new_lm_head.bias.copy_(self.qwen.lm_head.bias)
774
+ # 替换 lm_head
775
+ self.qwen.lm_head = new_lm_head
776
+ logger.info(f"[freeze_base_model] Created independent lm_head: {new_lm_head.weight.shape}")
777
+
778
+ for name, param in self.named_parameters():
779
+ # 判断是否是需要保持可训��的参数
780
+ is_memory = "neural_memory" in name or "mem_gate" in name
781
+ is_lm_head = "lm_head" in name
782
+
783
+ if is_memory:
784
+ param.requires_grad = True
785
+ trainable_count += 1
786
+ elif is_lm_head:
787
+ # 独立的 lm_head 可训练
788
+ param.requires_grad = True
789
+ trainable_count += 1
790
+ lm_head_count += 1
791
+ logger.info(f"[freeze_base_model] lm_head param: {name}")
792
+ else:
793
+ # 冻结其他所有参数,包括 embed_tokens!
794
+ param.requires_grad = False
795
+ frozen_count += 1
796
+
797
+ logger.info(f"[freeze_base_model] Frozen: {frozen_count}, Trainable: {trainable_count} (lm_head: {lm_head_count})")
798
+ return self
799
+
800
+ def get_param_groups(self, lr_memory: float, lr_pretrained: float, weight_decay: float):
801
+ memory_params = []
802
+ pretrained_params = []
803
+
804
+ for name, param in self.named_parameters():
805
+ if not param.requires_grad:
806
+ continue
807
+ if "neural_memory" in name or "mem_gate" in name:
808
+ memory_params.append(param)
809
+ else:
810
+ pretrained_params.append(param)
811
+
812
+ param_groups = []
813
+ if len(memory_params) > 0:
814
+ param_groups.append(
815
+ {"params": memory_params, "lr": lr_memory, "weight_decay": weight_decay, "name": "memory_module"}
816
+ )
817
+ if len(pretrained_params) > 0:
818
+ param_groups.append(
819
+ {"params": pretrained_params, "lr": lr_pretrained, "weight_decay": weight_decay, "name": "pretrained"}
820
+ )
821
+ logger.info(f"Param groups: memory={len(memory_params)}, pretrained={len(pretrained_params)}")
822
+ return param_groups
823
+
824
+
825
+ def init_distributed() -> tuple[bool, int, int, int]:
826
+ if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ:
827
+ return False, 0, 0, 1
828
+
829
+ rank = int(os.environ["RANK"])
830
+ world_size = int(os.environ["WORLD_SIZE"])
831
+ local_rank = int(os.environ.get("LOCAL_RANK", 0))
832
+
833
+ if not dist.is_available():
834
+ raise RuntimeError("torch.distributed not available")
835
+
836
+ if not dist.is_initialized():
837
+ dist.init_process_group(backend="nccl", init_method="env://")
838
+
839
+ torch.cuda.set_device(local_rank)
840
+ return True, rank, local_rank, world_size
841
+
842
+
843
+ def cleanup_distributed():
844
+ if dist.is_available() and dist.is_initialized():
845
+ dist.barrier()
846
+ dist.destroy_process_group()
847
+
848
+
849
+ def unwrap_model(model: nn.Module) -> nn.Module:
850
+ if hasattr(model, "module"):
851
+ return model.module
852
+ if hasattr(model, "_fsdp_wrapped_module"):
853
+ wrapped = getattr(model, "_fsdp_wrapped_module", None)
854
+ if wrapped is not None and hasattr(wrapped, "module"):
855
+ return wrapped.module
856
+ return model
857
+
858
+
859
+ def is_fsdp_model(model: nn.Module) -> bool:
860
+ try:
861
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
862
+ return isinstance(model, FSDP)
863
+ except Exception:
864
+ return False
865
+
866
+
867
+ class Trainer:
868
+ def __init__(
869
+ self,
870
+ model: QwenTitansForBABILong,
871
+ train_dataloader: DataLoader,
872
+ eval_dataloader: DataLoader,
873
+ config: TrainingConfig,
874
+ rank: int = 0,
875
+ world_size: int = 1,
876
+ is_distributed: bool = False,
877
+ tokenizer=None,
878
+ ):
879
+ self.model = model
880
+ self.train_dataloader = train_dataloader
881
+ self.eval_dataloader = eval_dataloader
882
+ self.config = config
883
+ self.device = next(model.parameters()).device
884
+ self.rank = rank
885
+ self.world_size = world_size
886
+ self.is_distributed = is_distributed
887
+ self.is_main_process = (rank == 0)
888
+ self.tokenizer = tokenizer
889
+
890
+ base_model = unwrap_model(self.model)
891
+ param_groups = base_model.get_param_groups(
892
+ lr_memory=config.lr_memory,
893
+ lr_pretrained=config.lr_pretrained,
894
+ weight_decay=config.weight_decay,
895
+ )
896
+ self.optimizer = AdamW(param_groups)
897
+
898
+ total_steps = math.ceil(
899
+ (len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1)
900
+ )
901
+ self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7)
902
+
903
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
904
+ self.global_step = 0
905
+
906
+ def _get_group_lr(self, group_name: str) -> Optional[float]:
907
+ for group in self.optimizer.param_groups:
908
+ if group.get("name") == group_name:
909
+ return group.get("lr")
910
+ return None
911
+
912
+ def train(self):
913
+ self.model.train()
914
+ if self.is_main_process:
915
+ logger.info("Start training")
916
+
917
+ last_epoch_loss = None
918
+ for epoch in range(self.config.num_epochs):
919
+ sampler = getattr(self.train_dataloader, "sampler", None)
920
+ if sampler is not None and hasattr(sampler, "set_epoch"):
921
+ sampler.set_epoch(epoch)
922
+ if self.is_main_process:
923
+ logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}")
924
+
925
+ epoch_loss = 0.0
926
+ num_batches = 0
927
+
928
+ pbar = self.train_dataloader
929
+ if self.is_main_process:
930
+ pbar = tqdm(
931
+ self.train_dataloader,
932
+ desc=f"Epoch {epoch + 1}/{self.config.num_epochs}",
933
+ leave=False,
934
+ dynamic_ncols=True,
935
+ )
936
+ for step, batch in enumerate(pbar):
937
+ batch = {k: v.to(self.device) for k, v in batch.items()}
938
+ if (
939
+ self.config.debug_label_batches > 0
940
+ and self.is_main_process
941
+ and step < int(self.config.debug_label_batches)
942
+ ):
943
+ labels = batch.get("labels")
944
+ if labels is not None:
945
+ label_tokens = int((labels != -100).sum().item())
946
+ loss_tokens = int((labels[:, 1:] != -100).sum().item()) if labels.size(1) > 1 else 0
947
+ attn_tokens = int(batch["attention_mask"].sum().item())
948
+ logger.info(
949
+ f"[BATCH DEBUG] epoch={epoch + 1} step={step + 1}: "
950
+ f"attn_tokens={attn_tokens}, label_tokens={label_tokens}, loss_tokens={loss_tokens}"
951
+ )
952
+ else:
953
+ logger.info(f"[BATCH DEBUG] epoch={epoch + 1} step={step + 1}: labels missing")
954
+
955
+ ga = max(self.config.gradient_accumulation_steps, 1)
956
+ sync_gradients = ((step + 1) % ga == 0)
957
+ amp_enabled = self.config.fp16 or self.config.bf16
958
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
959
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
960
+ if self.config.chunkwise_backward:
961
+ labels = batch.get("labels")
962
+ if labels is not None:
963
+ total_tokens = int((labels[:, 1:] != -100).sum().item())
964
+ else:
965
+ total_tokens = 0
966
+ loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga)
967
+
968
+ seq_len = batch["input_ids"].shape[1]
969
+ chunk_size = int(self.config.chunk_size)
970
+ chunk_ranges = [
971
+ (start, min(start + chunk_size, seq_len))
972
+ for start in range(0, seq_len, chunk_size)
973
+ ]
974
+ raw_loss_sum = None
975
+
976
+ for idx, (start, end) in enumerate(chunk_ranges):
977
+ is_last_chunk = (idx == len(chunk_ranges) - 1)
978
+ sync_chunk = sync_gradients and is_last_chunk
979
+ chunk_ctx = (
980
+ self.model.no_sync
981
+ if (self.is_distributed and not sync_chunk)
982
+ else nullcontext
983
+ )
984
+ with chunk_ctx():
985
+ outputs = self.model(
986
+ input_ids=batch["input_ids"],
987
+ attention_mask=batch["attention_mask"],
988
+ labels=labels,
989
+ chunk_start=start,
990
+ chunk_end=end,
991
+ reset_mem_state=(idx == 0),
992
+ )
993
+ chunk_loss_sum = outputs["loss_sum"]
994
+ chunk_loss_tokens = int(outputs.get("loss_tokens", 0))
995
+ if raw_loss_sum is None:
996
+ raw_loss_sum = chunk_loss_sum.detach()
997
+ else:
998
+ raw_loss_sum = raw_loss_sum + chunk_loss_sum.detach()
999
+
1000
+ # DDP 关键:所有 rank 必须执行相同的 backward 调用序列
1001
+ # 即使 loss_scale=0 或 chunk 无有效 token,也要调用 backward
1002
+ # 以确保 allreduce 同步
1003
+ scaled_loss = chunk_loss_sum * float(loss_scale)
1004
+ if self.config.fp16:
1005
+ self.scaler.scale(scaled_loss).backward()
1006
+ else:
1007
+ scaled_loss.backward()
1008
+
1009
+ if raw_loss_sum is None or total_tokens == 0:
1010
+ raw_loss = torch.zeros((), device=self.device, dtype=torch.float32)
1011
+ else:
1012
+ raw_loss = raw_loss_sum / total_tokens
1013
+ loss = raw_loss / ga
1014
+ else:
1015
+ ctx = self.model.no_sync if (self.is_distributed and not sync_gradients) else nullcontext
1016
+ with ctx():
1017
+ outputs = self.model(
1018
+ input_ids=batch["input_ids"],
1019
+ attention_mask=batch["attention_mask"],
1020
+ labels=batch["labels"],
1021
+ )
1022
+ raw_loss = outputs["loss"]
1023
+ loss = raw_loss / ga
1024
+
1025
+ if self.config.fp16:
1026
+ self.scaler.scale(loss).backward()
1027
+ else:
1028
+ loss.backward()
1029
+
1030
+ epoch_loss += raw_loss.detach().float().item()
1031
+ num_batches += 1
1032
+
1033
+ if sync_gradients:
1034
+ grad_norm = None
1035
+ if self.config.fp16:
1036
+ self.scaler.unscale_(self.optimizer)
1037
+ grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
1038
+ self.scaler.step(self.optimizer)
1039
+ self.scaler.update()
1040
+ else:
1041
+ grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
1042
+ self.optimizer.step()
1043
+
1044
+ self.scheduler.step()
1045
+ self.optimizer.zero_grad(set_to_none=True)
1046
+ self.global_step += 1
1047
+
1048
+ if self.is_main_process:
1049
+ avg_loss = epoch_loss / max(num_batches, 1)
1050
+ pbar.set_postfix(
1051
+ {
1052
+ "gstep": self.global_step,
1053
+ "loss": f"{avg_loss:.4f}",
1054
+ }
1055
+ )
1056
+
1057
+ if self.global_step % self.config.logging_steps == 0 and self.is_main_process:
1058
+ lr_mem = self._get_group_lr("memory_module")
1059
+ lr_pre = self._get_group_lr("pretrained")
1060
+ if lr_pre is None and self.optimizer.param_groups:
1061
+ lr_pre = self.optimizer.param_groups[0]["lr"]
1062
+ grad_note = ""
1063
+ if self.config.debug_grad_norm and grad_norm is not None:
1064
+ grad_note = f" | grad_norm={float(grad_norm):.4f}"
1065
+ if lr_mem is None:
1066
+ lr_label = f"lr={lr_pre:.2e}" if lr_pre is not None else "lr=NA"
1067
+ logger.info(
1068
+ f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | "
1069
+ f"{lr_label}{grad_note}"
1070
+ )
1071
+ else:
1072
+ logger.info(
1073
+ f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | "
1074
+ f"lr_mem={lr_mem:.2e} | lr_pre={lr_pre:.2e}{grad_note}"
1075
+ )
1076
+
1077
+ if self.global_step % self.config.eval_steps == 0:
1078
+ eval_metrics = self.evaluate()
1079
+ if self.is_main_process:
1080
+ logger.info(
1081
+ f"Step {self.global_step}: "
1082
+ f"eval_loss={eval_metrics['loss']:.4f}, "
1083
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1084
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1085
+ )
1086
+ self.model.train()
1087
+
1088
+ avg_epoch_loss = epoch_loss / max(num_batches, 1)
1089
+ if self.is_distributed:
1090
+ t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32)
1091
+ dist.all_reduce(t, op=dist.ReduceOp.SUM)
1092
+ avg_epoch_loss = (t / self.world_size).item()
1093
+
1094
+ if self.is_main_process:
1095
+ logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}")
1096
+ last_epoch_loss = avg_epoch_loss
1097
+
1098
+ eval_metrics = self.evaluate()
1099
+ if self.is_main_process:
1100
+ logger.info(
1101
+ f"[EPOCH {epoch + 1} EVAL] "
1102
+ f"eval_loss={eval_metrics['loss']:.4f}, "
1103
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1104
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1105
+ )
1106
+ self._append_eval_metrics(
1107
+ eval_metrics,
1108
+ phase="epoch",
1109
+ epoch=int(epoch + 1),
1110
+ train_avg_loss=avg_epoch_loss,
1111
+ )
1112
+ self.model.train()
1113
+
1114
+ if self.is_main_process:
1115
+ logger.info("Training done, final evaluation")
1116
+
1117
+ final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples))
1118
+ if self.is_main_process:
1119
+ ppl = float(math.exp(min(20.0, final_eval["loss"])))
1120
+ logger.info(
1121
+ f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl≈{ppl:.3f}, "
1122
+ f"em_acc={final_eval['em_acc'] * 100:.2f}%, "
1123
+ f"tok_acc={final_eval['tok_acc'] * 100:.2f}%"
1124
+ )
1125
+ logger.info("Saving final checkpoint")
1126
+ self._append_eval_metrics(
1127
+ final_eval,
1128
+ phase="final",
1129
+ epoch=int(self.config.num_epochs),
1130
+ train_avg_loss=last_epoch_loss,
1131
+ )
1132
+ self.save_final_checkpoint()
1133
+
1134
+ @torch.no_grad()
1135
+ def evaluate(self, print_examples: int = 0) -> Dict[str, float]:
1136
+ self.model.eval()
1137
+ total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1138
+ total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1139
+
1140
+ total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1141
+ total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1142
+ total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1143
+ total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1144
+ total_topk_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1145
+ total_topk_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1146
+ printed = 0
1147
+
1148
+ for batch in self.eval_dataloader:
1149
+ batch = {k: v.to(self.device) for k, v in batch.items()}
1150
+ amp_enabled = self.config.fp16 or self.config.bf16
1151
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
1152
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
1153
+ outputs = self.model(
1154
+ input_ids=batch["input_ids"],
1155
+ attention_mask=batch["attention_mask"],
1156
+ labels=batch["labels"],
1157
+ return_pred_tokens=True,
1158
+ topk=int(self.config.eval_topk) if self.config.eval_topk else 0,
1159
+ )
1160
+
1161
+ if torch.isfinite(outputs["loss"]):
1162
+ total_loss += outputs["loss"].detach().float()
1163
+ total_batches += 1.0
1164
+
1165
+ pred_ids = outputs.get("pred_ids", None)
1166
+ target_ids = outputs.get("target_ids", None)
1167
+ lengths = outputs.get("target_lengths", None)
1168
+ topk_correct = outputs.get("topk_correct", None)
1169
+ topk_total = outputs.get("topk_total", None)
1170
+ if topk_correct is not None and topk_total is not None:
1171
+ total_topk_correct += topk_correct.detach().float()
1172
+ total_topk_total += topk_total.detach().float()
1173
+ if (
1174
+ pred_ids is not None
1175
+ and target_ids is not None
1176
+ and lengths is not None
1177
+ and pred_ids.ndim == 2
1178
+ and target_ids.ndim == 2
1179
+ and lengths.ndim == 1
1180
+ and pred_ids.shape == target_ids.shape
1181
+ and pred_ids.shape[0] == lengths.shape[0]
1182
+ ):
1183
+ pred_cpu = pred_ids.to("cpu", dtype=torch.long)
1184
+ tgt_cpu = target_ids.to("cpu", dtype=torch.long)
1185
+ len_cpu = lengths.to("cpu", dtype=torch.long)
1186
+
1187
+ for i in range(int(len_cpu.shape[0])):
1188
+ L = int(len_cpu[i].item())
1189
+ if L <= 0:
1190
+ continue
1191
+ p = pred_cpu[i, :L]
1192
+ t = tgt_cpu[i, :L]
1193
+
1194
+ total_tok_correct += torch.tensor(float((p == t).sum().item()), device=self.device, dtype=torch.float32)
1195
+ total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32)
1196
+
1197
+ if self.tokenizer is not None:
1198
+ pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip()
1199
+ tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip()
1200
+ em = float(pred_text == tgt_text)
1201
+ total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32)
1202
+ total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32)
1203
+
1204
+ if self.is_main_process and printed < print_examples:
1205
+ logger.info(f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}")
1206
+ printed += 1
1207
+
1208
+ if self.is_distributed:
1209
+ dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
1210
+ dist.all_reduce(total_batches, op=dist.ReduceOp.SUM)
1211
+ dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM)
1212
+ dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM)
1213
+ dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM)
1214
+ dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM)
1215
+ dist.all_reduce(total_topk_correct, op=dist.ReduceOp.SUM)
1216
+ dist.all_reduce(total_topk_total, op=dist.ReduceOp.SUM)
1217
+
1218
+ avg_loss = (total_loss / total_batches.clamp(min=1.0)).item()
1219
+ tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item()
1220
+ em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item()
1221
+ topk_acc = (total_topk_correct / total_topk_total.clamp(min=1.0)).item()
1222
+ if self.is_main_process:
1223
+ if self.config.debug_eval_stats:
1224
+ logger.info(
1225
+ "[EVAL DEBUG] total_batches="
1226
+ f"{float(total_batches.item()):.0f}, total_tok_total={float(total_tok_total.item()):.0f}, "
1227
+ f"total_em_total={float(total_em_total.item()):.0f}, "
1228
+ f"total_topk_total={float(total_topk_total.item()):.0f}"
1229
+ )
1230
+ if total_tok_total.item() == 0:
1231
+ logger.warning("[EVAL DEBUG] No answer tokens found in eval set; acc will be 0.")
1232
+ logger.info(f"[EVAL METRIC] token_acc(answer-only) = {tok_acc * 100:.2f}%")
1233
+ logger.info(f"[EVAL METRIC] EM/acc(answer-only) = {em_acc * 100:.2f}%")
1234
+ if self.config.eval_topk and self.config.eval_topk > 0:
1235
+ logger.info(f"[EVAL METRIC] top{int(self.config.eval_topk)}_acc(answer-only) = {topk_acc * 100:.2f}%")
1236
+ return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc, "topk_acc": topk_acc}
1237
+
1238
+ def _append_eval_metrics(
1239
+ self,
1240
+ metrics: Dict[str, float],
1241
+ *,
1242
+ phase: str,
1243
+ epoch: Optional[int],
1244
+ train_avg_loss: Optional[float],
1245
+ ) -> None:
1246
+ if not self.is_main_process:
1247
+ return
1248
+ os.makedirs(self.config.output_dir, exist_ok=True)
1249
+ record = {
1250
+ "phase": phase,
1251
+ "epoch": epoch,
1252
+ "global_step": int(self.global_step),
1253
+ "train_avg_loss": None if train_avg_loss is None else float(train_avg_loss),
1254
+ "eval_loss": float(metrics.get("loss", 0.0)),
1255
+ "em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0),
1256
+ "tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0),
1257
+ }
1258
+ metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl")
1259
+ with open(metrics_path, "a") as f:
1260
+ f.write(json.dumps(record) + "\n")
1261
+
1262
+ def save_final_checkpoint(self):
1263
+ ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name)
1264
+ base_model = unwrap_model(self.model)
1265
+ memory_sd = {
1266
+ name: p.detach().cpu()
1267
+ for name, p in base_model.named_parameters()
1268
+ if ("neural_memory" in name) or ("mem_gate" in name)
1269
+ }
1270
+
1271
+ if is_fsdp_model(self.model) and len(memory_sd) == 0:
1272
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1273
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1274
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1275
+ full_sd = self.model.state_dict()
1276
+ memory_sd = {k: v for k, v in full_sd.items() if ("neural_memory" in k) or ("mem_gate" in k)}
1277
+
1278
+ if self.is_main_process:
1279
+ torch.save(
1280
+ {"memory_state_dict": memory_sd, "global_step": self.global_step, "config": asdict(self.config)},
1281
+ ckpt_path,
1282
+ )
1283
+ logger.info(f"Saved memory checkpoint: {ckpt_path}")
1284
+ if self.is_distributed:
1285
+ dist.barrier()
1286
+
1287
+ if self.config.save_full_checkpoint:
1288
+ full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name)
1289
+ if is_fsdp_model(self.model):
1290
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1291
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1292
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1293
+ full_sd = self.model.state_dict()
1294
+ else:
1295
+ full_sd = unwrap_model(self.model).state_dict()
1296
+
1297
+ if self.is_main_process:
1298
+ torch.save(
1299
+ {"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)},
1300
+ full_ckpt_path,
1301
+ )
1302
+ logger.info(f"Saved full checkpoint: {full_ckpt_path}")
1303
+ if self.is_distributed:
1304
+ dist.barrier()
1305
+
1306
+
1307
+ def main():
1308
+ from transformers import AutoModelForCausalLM, AutoTokenizer
1309
+
1310
+ parser = argparse.ArgumentParser()
1311
+ parser.add_argument("--fsdp", action="store_true")
1312
+ parser.add_argument("--eval_only", action="store_true")
1313
+ parser.add_argument("--ckpt_path", type=str, default=None)
1314
+ parser.add_argument("--max_eval_samples", type=int, default=None)
1315
+ parser.add_argument("--max_samples", type=int, default=None)
1316
+ parser.add_argument("--max_length", type=int, default=None)
1317
+ parser.add_argument("--output_dir", type=str, default=None)
1318
+ parser.add_argument("--num_epochs", type=int, default=None)
1319
+ parser.add_argument("--eval_steps", type=int, default=None)
1320
+ parser.add_argument("--eval_topk", type=int, default=0)
1321
+ parser.add_argument("--batch_size", type=int, default=None)
1322
+ parser.add_argument("--gradient_accumulation_steps", type=int, default=None)
1323
+ parser.add_argument("--chunk_size", type=int, default=None)
1324
+ parser.add_argument("--memory_layer_stride", type=int, default=None)
1325
+ parser.add_argument("--no_memory", action="store_true")
1326
+ parser.add_argument("--gradient_checkpointing", action="store_true")
1327
+ parser.add_argument("--no_chunkwise_backward", action="store_true")
1328
+ parser.add_argument("--log_every_batches", type=int, default=80)
1329
+ parser.add_argument("--label_prefix_tokens", type=int, default=0)
1330
+ parser.add_argument(
1331
+ "--no_detach_mem_state",
1332
+ action="store_true",
1333
+ help="(ignored) kept for backward compatibility; detach_mem_state is forced True",
1334
+ )
1335
+ parser.add_argument("--debug_data_samples", type=int, default=0)
1336
+ parser.add_argument("--debug_label_batches", type=int, default=0)
1337
+ parser.add_argument("--debug_eval_stats", action="store_true")
1338
+ parser.add_argument("--debug_grad_norm", action="store_true")
1339
+ parser.add_argument(
1340
+ "--freeze_base_model",
1341
+ action="store_true",
1342
+ help="冻结 Qwen base 模型,只训练记忆模块 (neural_memory + mem_gate)",
1343
+ )
1344
+ args = parser.parse_args()
1345
+
1346
+ config = TrainingConfig()
1347
+ if args.fsdp:
1348
+ config.use_fsdp = True
1349
+ if args.no_memory:
1350
+ config.use_memory = False
1351
+ if args.max_samples is not None:
1352
+ config.max_samples = args.max_samples
1353
+ if args.max_length is not None:
1354
+ config.max_length = int(args.max_length)
1355
+ if args.output_dir is not None:
1356
+ config.output_dir = args.output_dir
1357
+ elif not config.use_memory:
1358
+ config.output_dir = "./outputs/qwen_babilong_no_memory"
1359
+ if args.num_epochs is not None:
1360
+ config.num_epochs = args.num_epochs
1361
+ if args.eval_steps is not None:
1362
+ config.eval_steps = args.eval_steps
1363
+ if args.eval_topk is not None:
1364
+ config.eval_topk = int(args.eval_topk)
1365
+ if args.batch_size is not None:
1366
+ config.batch_size = int(args.batch_size)
1367
+ if args.gradient_accumulation_steps is not None:
1368
+ config.gradient_accumulation_steps = int(args.gradient_accumulation_steps)
1369
+ if args.chunk_size is not None:
1370
+ config.chunk_size = int(args.chunk_size)
1371
+ if args.memory_layer_stride is not None:
1372
+ config.memory_layer_stride = int(args.memory_layer_stride)
1373
+ if args.gradient_checkpointing:
1374
+ config.gradient_checkpointing = True
1375
+ if args.no_chunkwise_backward:
1376
+ config.chunkwise_backward = False
1377
+ if args.label_prefix_tokens is not None:
1378
+ config.label_prefix_tokens = int(args.label_prefix_tokens)
1379
+ ignored_no_detach = bool(args.no_detach_mem_state)
1380
+ if args.log_every_batches is not None:
1381
+ config.log_every_batches = int(args.log_every_batches)
1382
+ ga = max(int(config.gradient_accumulation_steps), 1)
1383
+ config.logging_steps = max(1, math.ceil(config.log_every_batches / ga))
1384
+ if args.debug_data_samples is not None:
1385
+ config.debug_data_samples = int(args.debug_data_samples)
1386
+ if args.debug_label_batches is not None:
1387
+ config.debug_label_batches = int(args.debug_label_batches)
1388
+ if args.debug_eval_stats:
1389
+ config.debug_eval_stats = True
1390
+ if args.debug_grad_norm:
1391
+ config.debug_grad_norm = True
1392
+
1393
+ is_distributed, rank, local_rank, world_size = init_distributed()
1394
+ is_main = (rank == 0)
1395
+ if ignored_no_detach and is_main:
1396
+ logger.warning("Ignoring --no_detach_mem_state; plan A keeps detach_mem_state=True.")
1397
+
1398
+ if config.use_fsdp and config.chunkwise_backward:
1399
+ if is_main:
1400
+ logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.")
1401
+ config.chunkwise_backward = False
1402
+
1403
+ if is_distributed and (not config.use_fsdp) and config.gradient_checkpointing:
1404
+ config.gradient_checkpointing = False
1405
+ if is_main:
1406
+ logger.warning("gradient_checkpointing is unstable with DDP here; disabling it.")
1407
+
1408
+ if is_distributed and (not config.use_fsdp) and config.chunkwise_backward:
1409
+ if is_main:
1410
+ logger.info("DDP chunkwise backward enabled via per-chunk forward/backward.")
1411
+
1412
+ if is_distributed and (not config.use_fsdp):
1413
+ if not config.ddp_find_unused_parameters:
1414
+ config.ddp_find_unused_parameters = True
1415
+ if is_main:
1416
+ logger.warning("Enabling DDP find_unused_parameters to avoid unused grad errors.")
1417
+
1418
+ torch.manual_seed(config.seed + rank)
1419
+
1420
+ if torch.cuda.is_available():
1421
+ device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda")
1422
+ else:
1423
+ device = torch.device("cpu")
1424
+
1425
+ if torch.cuda.is_available() and config.bf16:
1426
+ bf16_supported = False
1427
+ try:
1428
+ bf16_supported = torch.cuda.is_bf16_supported()
1429
+ except Exception:
1430
+ bf16_supported = False
1431
+ if not bf16_supported:
1432
+ if is_main:
1433
+ logger.warning("bf16 not supported on this GPU/runtime; falling back to fp16.")
1434
+ config.bf16 = False
1435
+ if not config.fp16:
1436
+ config.fp16 = True
1437
+
1438
+ if torch.cuda.is_available() and getattr(config, "use_tf32", False):
1439
+ torch.backends.cuda.matmul.allow_tf32 = True
1440
+ torch.backends.cudnn.allow_tf32 = True
1441
+ try:
1442
+ torch.set_float32_matmul_precision("high")
1443
+ except Exception:
1444
+ pass
1445
+
1446
+ if is_main:
1447
+ logger.info("=" * 60)
1448
+ logger.info("Qwen3-4B + Titans training (DDP/FSDP)")
1449
+ logger.info("=" * 60)
1450
+ logger.info(f"distributed={is_distributed}, world_size={world_size}, use_fsdp={config.use_fsdp}")
1451
+ logger.info(f"mode={'EVAL_ONLY' if args.eval_only else 'TRAIN'}")
1452
+ logger.info(f"model_path={config.model_path}")
1453
+ logger.info(f"data_path={config.data_path}")
1454
+ logger.info(f"output_dir={config.output_dir}")
1455
+ logger.info(f"max_samples={config.max_samples}")
1456
+ logger.info(f"max_length={config.max_length}")
1457
+ logger.info(f"chunk_size={config.chunk_size}")
1458
+ logger.info(f"use_memory={config.use_memory}")
1459
+ if config.use_memory:
1460
+ logger.info(f"memory_layer_stride={config.memory_layer_stride}")
1461
+ logger.info(f"chunkwise_backward={config.chunkwise_backward}")
1462
+ logger.info(f"label_prefix_tokens={config.label_prefix_tokens}")
1463
+ logger.info(f"detach_mem_state={config.detach_mem_state}")
1464
+ logger.info(f"freeze_base_model={config.freeze_base_model}")
1465
+ if config.eval_topk:
1466
+ logger.info(f"eval_topk={config.eval_topk}")
1467
+
1468
+ tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True)
1469
+ if tokenizer.pad_token is None:
1470
+ tokenizer.pad_token = tokenizer.eos_token
1471
+
1472
+ # disable flash-attn / torchao / torchvision detection
1473
+ try:
1474
+ import transformers
1475
+ from transformers.utils import import_utils as _import_utils
1476
+
1477
+ def _disabled(*args, **kwargs):
1478
+ return False
1479
+
1480
+ _import_utils.is_flash_attn_2_available = _disabled
1481
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"):
1482
+ transformers.utils.is_flash_attn_2_available = _disabled
1483
+
1484
+ _import_utils.is_torchao_available = _disabled
1485
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_torchao_available"):
1486
+ transformers.utils.is_torchao_available = _disabled
1487
+
1488
+ _import_utils.is_torchvision_available = _disabled
1489
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_torchvision_available"):
1490
+ transformers.utils.is_torchvision_available = _disabled
1491
+ except Exception as e:
1492
+ logger.warning(f"Disable checks failed (ignored): {e}")
1493
+
1494
+ torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32)
1495
+
1496
+ qwen_model = AutoModelForCausalLM.from_pretrained(
1497
+ config.model_path,
1498
+ torch_dtype=torch_dtype,
1499
+ device_map=None,
1500
+ trust_remote_code=True,
1501
+ attn_implementation="sdpa",
1502
+ low_cpu_mem_usage=True,
1503
+ )
1504
+ qwen_model.to(device)
1505
+ qwen_model.config.use_cache = False
1506
+ if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"):
1507
+ qwen_model.gradient_checkpointing_enable()
1508
+
1509
+ train_dataset = BABILongDataset(
1510
+ config.data_path,
1511
+ tokenizer,
1512
+ max_length=config.max_length,
1513
+ answer_reserve_tokens=config.answer_reserve_tokens,
1514
+ label_prefix_tokens=config.label_prefix_tokens,
1515
+ max_samples=config.max_samples,
1516
+ )
1517
+
1518
+ train_size = int(0.9 * len(train_dataset))
1519
+ eval_size = len(train_dataset) - train_size
1520
+ train_dataset, eval_dataset = torch.utils.data.random_split(
1521
+ train_dataset,
1522
+ [train_size, eval_size],
1523
+ generator=torch.Generator().manual_seed(config.seed),
1524
+ )
1525
+
1526
+ if is_main and config.debug_data_samples > 0:
1527
+ log_dataset_debug_stats(train_dataset, tokenizer, "train", config.debug_data_samples)
1528
+ log_dataset_debug_stats(eval_dataset, tokenizer, "eval", config.debug_data_samples)
1529
+
1530
+ train_sampler = None
1531
+ eval_sampler = None
1532
+ if is_distributed:
1533
+ from torch.utils.data.distributed import DistributedSampler
1534
+ train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed)
1535
+ eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False)
1536
+
1537
+ train_dataloader = DataLoader(
1538
+ train_dataset,
1539
+ batch_size=config.batch_size,
1540
+ shuffle=(train_sampler is None),
1541
+ sampler=train_sampler,
1542
+ collate_fn=collate_fn,
1543
+ num_workers=0,
1544
+ )
1545
+ eval_dataloader = DataLoader(
1546
+ eval_dataset,
1547
+ batch_size=config.batch_size,
1548
+ shuffle=False,
1549
+ sampler=eval_sampler,
1550
+ collate_fn=collate_fn,
1551
+ num_workers=0,
1552
+ )
1553
+
1554
+ model = QwenTitansForBABILong(qwen_model, config)
1555
+ model.to(device)
1556
+
1557
+ # 处理 freeze_base_model 参数
1558
+ if args.freeze_base_model:
1559
+ config.freeze_base_model = True
1560
+
1561
+ if config.freeze_base_model:
1562
+ if not config.use_memory:
1563
+ if is_main:
1564
+ logger.error("--freeze_base_model requires memory module (--no_memory is incompatible)")
1565
+ raise ValueError("freeze_base_model requires use_memory=True")
1566
+ model.freeze_base_model()
1567
+ if is_main:
1568
+ logger.info("=" * 40)
1569
+ logger.info("FREEZE MODE: Training memory + independent lm_head")
1570
+ logger.info(" - Trainable: neural_memory, mem_gate, lm_head (untied)")
1571
+ logger.info(" - Frozen: embed_tokens, transformer layers, norm")
1572
+ logger.info("=" * 40)
1573
+
1574
+ if is_distributed:
1575
+ if config.use_fsdp:
1576
+ from functools import partial
1577
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision
1578
+ from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
1579
+ from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer
1580
+
1581
+ mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype)
1582
+ auto_wrap = partial(transformer_auto_wrap_policy, transformer_layer_cls={Qwen3DecoderLayer, QwenDecoderLayerWithTitansMemory})
1583
+
1584
+ model = FSDP(
1585
+ model,
1586
+ auto_wrap_policy=auto_wrap,
1587
+ mixed_precision=mp_policy,
1588
+ device_id=torch.cuda.current_device(),
1589
+ use_orig_params=config.fsdp_use_orig_params,
1590
+ ignored_modules=model.get_memory_modules(),
1591
+ )
1592
+ else:
1593
+ model = DDP(
1594
+ model,
1595
+ device_ids=[local_rank],
1596
+ output_device=local_rank,
1597
+ find_unused_parameters=config.ddp_find_unused_parameters,
1598
+ )
1599
+ if config.gradient_checkpointing:
1600
+ try:
1601
+ model._set_static_graph()
1602
+ if is_main:
1603
+ logger.warning("DDP static graph enabled for gradient checkpointing.")
1604
+ except Exception as e:
1605
+ if is_main:
1606
+ logger.warning(f"DDP static graph enable failed (ignored): {e}")
1607
+
1608
+ trainer = Trainer(
1609
+ model=model,
1610
+ train_dataloader=train_dataloader,
1611
+ eval_dataloader=eval_dataloader,
1612
+ config=config,
1613
+ rank=rank,
1614
+ world_size=world_size,
1615
+ is_distributed=is_distributed,
1616
+ tokenizer=tokenizer,
1617
+ )
1618
+
1619
+ if args.eval_only:
1620
+ ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name)
1621
+ if is_main:
1622
+ logger.info(f"eval_only: loading checkpoint: {ckpt_path}")
1623
+ ckpt = torch.load(ckpt_path, map_location="cpu")
1624
+ has_full = isinstance(ckpt, dict) and ("model_state_dict" in ckpt)
1625
+ if has_full:
1626
+ full_sd = ckpt["model_state_dict"]
1627
+ if is_fsdp_model(model):
1628
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1629
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1630
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_cfg):
1631
+ sd_to_load = full_sd if is_main else {}
1632
+ model.load_state_dict(sd_to_load, strict=False)
1633
+ else:
1634
+ unwrap_model(model).load_state_dict(full_sd, strict=False)
1635
+
1636
+ memory_sd = ckpt.get("memory_state_dict", ckpt if isinstance(ckpt, dict) else {})
1637
+ memory_sd = {k: v for k, v in memory_sd.items() if ("neural_memory" in k) or ("mem_gate" in k)}
1638
+ if len(memory_sd) > 0:
1639
+ if is_fsdp_model(model):
1640
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1641
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1642
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_cfg):
1643
+ sd_to_load = memory_sd if is_main else {}
1644
+ model.load_state_dict(sd_to_load, strict=False)
1645
+ else:
1646
+ unwrap_model(model).load_state_dict(memory_sd, strict=False)
1647
+
1648
+ eval_metrics = trainer.evaluate()
1649
+ if is_main:
1650
+ ppl = float(math.exp(min(20.0, eval_metrics["loss"])))
1651
+ logger.info(
1652
+ f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl≈{ppl:.3f}, "
1653
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1654
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1655
+ )
1656
+ cleanup_distributed()
1657
+ return
1658
+
1659
+ trainer.train()
1660
+ cleanup_distributed()
1661
+
1662
+
1663
+ if __name__ == "__main__":
1664
+ main()
examples/train_qwen_titans_babilong_v2.py ADDED
@@ -0,0 +1,1573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Qwen3 + Titans training on BABILong QA1 (32k).
3
+
4
+ Key ideas:
5
+ - Fixed-length 32k samples for DDP/FSDP stability.
6
+ - Stream long sequences by chunk (default 8k).
7
+ - Insert Titans memory modules into Qwen layers (stride configurable).
8
+ """
9
+
10
+ import os
11
+ import json
12
+ import math
13
+ import argparse
14
+ import logging
15
+ import weakref
16
+ from contextlib import nullcontext
17
+ from dataclasses import dataclass, asdict
18
+ from typing import Optional, Dict, Any, List, Tuple, Callable
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ import torch.distributed as dist
24
+ from torch.utils.data import Dataset, DataLoader
25
+ from torch.optim import AdamW
26
+ from torch.optim.lr_scheduler import CosineAnnealingLR
27
+ from torch.nn.parallel import DistributedDataParallel as DDP
28
+ from tqdm import tqdm
29
+
30
+ from einops import rearrange
31
+
32
+ # add repo root to sys.path
33
+ import sys
34
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
35
+
36
+ # Titans components
37
+ from titans_pytorch import NeuralMemory, MemoryMLP
38
+ from titans_pytorch.neural_memory import NeuralMemState
39
+
40
+ logging.basicConfig(
41
+ level=logging.INFO,
42
+ format="%(asctime)s - %(levelname)s - %(message)s"
43
+ )
44
+ logger = logging.getLogger(__name__)
45
+
46
+
47
+ @dataclass
48
+ class TrainingConfig:
49
+ # paths
50
+ model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554"
51
+ data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json"
52
+ output_dir: str = "./outputs/qwen_titans_babilong"
53
+
54
+ # training
55
+ num_epochs: int = 10
56
+ batch_size: int = 2
57
+ gradient_accumulation_steps: int = 8
58
+ max_grad_norm: float = 1.0
59
+
60
+ # learning rates
61
+ lr_memory: float = 1e-4
62
+ lr_pretrained: float = 5e-6
63
+ weight_decay: float = 0.01
64
+ warmup_steps: int = 100
65
+
66
+ # streaming / memory
67
+ chunk_size: int = 8192
68
+ use_memory: bool = True
69
+ memory_chunk_size: int = 128
70
+ memory_batch_size: int = 128
71
+ memory_heads: int = 8
72
+ memory_dim_head: int = 64
73
+ memory_depth: int = 1
74
+ memory_layer_stride: int = 8
75
+ memory_fp32: bool = True
76
+ detach_mem_state: bool = True
77
+
78
+ # evaluation / logging
79
+ eval_steps: int = 200
80
+ eval_topk: int = 0
81
+ logging_steps: int = 10
82
+ log_every_batches: int = 80
83
+ final_eval_print_examples: int = 10
84
+ debug_data_samples: int = 0
85
+ debug_label_batches: int = 0
86
+ debug_eval_stats: bool = False
87
+ debug_grad_norm: bool = False
88
+
89
+ # precision
90
+ bf16: bool = True
91
+ fp16: bool = False
92
+ use_tf32: bool = True
93
+ gradient_checkpointing: bool = False
94
+ chunkwise_backward: bool = True
95
+ chunkwise_backward: bool = True
96
+
97
+ # data
98
+ max_length: int = 32768
99
+ answer_reserve_tokens: int = 64
100
+ label_prefix_tokens: int = 0
101
+ max_samples: Optional[int] = 500 # 快速实验用 500,完整训练可设置更大值
102
+
103
+ # fsdp
104
+ use_fsdp: bool = False
105
+ fsdp_use_orig_params: bool = True
106
+ ddp_find_unused_parameters: bool = False
107
+
108
+ # checkpoint
109
+ save_full_checkpoint: bool = True
110
+ final_ckpt_name: str = "final_memory_checkpoint.pt"
111
+ final_full_ckpt_name: str = "final_full_checkpoint.pt"
112
+
113
+ seed: int = 42
114
+
115
+
116
+ class BABILongDataset(Dataset):
117
+ def __init__(
118
+ self,
119
+ data_path: str,
120
+ tokenizer,
121
+ max_length: int = 32768,
122
+ answer_reserve_tokens: int = 64,
123
+ label_prefix_tokens: int = 0,
124
+ max_samples: Optional[int] = None,
125
+ ):
126
+ self.tokenizer = tokenizer
127
+ self.max_length = max_length
128
+ self.answer_reserve_tokens = answer_reserve_tokens
129
+ self.label_prefix_tokens = int(label_prefix_tokens)
130
+
131
+ logger.info(f"Loading dataset: {data_path}")
132
+ with open(data_path, "r") as f:
133
+ self.data = json.load(f)
134
+
135
+ if max_samples:
136
+ self.data = self.data[:max_samples]
137
+
138
+ logger.info(f"Dataset size: {len(self.data)}")
139
+
140
+ def __len__(self):
141
+ return len(self.data)
142
+
143
+ def __getitem__(self, idx):
144
+ item = self.data[idx]
145
+ text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:"
146
+ target = item["target"]
147
+
148
+ pad_id = self.tokenizer.pad_token_id or 0
149
+ reserve = int(self.answer_reserve_tokens)
150
+
151
+ prompt_ids = self.tokenizer(
152
+ text,
153
+ max_length=max(self.max_length - reserve, 1),
154
+ truncation=True,
155
+ add_special_tokens=True,
156
+ return_tensors="pt",
157
+ ).input_ids.squeeze(0)
158
+
159
+ answer_ids = self.tokenizer(
160
+ f" {target}",
161
+ add_special_tokens=False,
162
+ return_tensors="pt",
163
+ ).input_ids.squeeze(0)
164
+
165
+ available = max(self.max_length - prompt_ids.numel(), 0)
166
+ answer_ids = answer_ids[:available]
167
+
168
+ input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length]
169
+
170
+ labels = torch.full_like(input_ids, fill_value=-100)
171
+ if answer_ids.numel() > 0:
172
+ start = prompt_ids.numel()
173
+ end = min(start + answer_ids.numel(), labels.numel())
174
+ labels[start:end] = input_ids[start:end]
175
+ if self.label_prefix_tokens > 0:
176
+ prefix = min(start, self.label_prefix_tokens)
177
+ if prefix > 0:
178
+ labels[start - prefix:start] = input_ids[start - prefix:start]
179
+
180
+ seq_len = input_ids.numel()
181
+ if seq_len < self.max_length:
182
+ pad_len = self.max_length - seq_len
183
+ input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id))
184
+ labels = F.pad(labels, (0, pad_len), value=-100)
185
+ attention_mask = torch.cat(
186
+ [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)],
187
+ dim=0,
188
+ )
189
+ else:
190
+ attention_mask = torch.ones(self.max_length, dtype=torch.long)
191
+
192
+ return {
193
+ "input_ids": input_ids.to(dtype=torch.long),
194
+ "labels": labels.to(dtype=torch.long),
195
+ "attention_mask": attention_mask,
196
+ }
197
+
198
+
199
+ def collate_fn(batch):
200
+ keys = batch[0].keys()
201
+ return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys}
202
+
203
+
204
+ def _get_raw_dataset_item(dataset, idx: int) -> Optional[Dict[str, Any]]:
205
+ base = dataset
206
+ true_idx = idx
207
+ if isinstance(dataset, torch.utils.data.Subset):
208
+ base = dataset.dataset
209
+ true_idx = dataset.indices[idx]
210
+ if isinstance(base, BABILongDataset) and hasattr(base, "data"):
211
+ try:
212
+ return base.data[true_idx]
213
+ except Exception:
214
+ return None
215
+ return None
216
+
217
+
218
+ def log_dataset_debug_stats(dataset, tokenizer, name: str, num_samples: int) -> None:
219
+ if num_samples <= 0:
220
+ return
221
+ total = len(dataset)
222
+ if total <= 0:
223
+ logger.warning(f"[DATA DEBUG] {name}: empty dataset")
224
+ return
225
+
226
+ n = min(int(num_samples), total)
227
+ zero_label = 0
228
+ total_label_tokens = 0
229
+ total_loss_tokens = 0
230
+ total_attn_tokens = 0
231
+
232
+ for i in range(n):
233
+ sample = dataset[i]
234
+ labels = sample["labels"]
235
+ attn = sample["attention_mask"]
236
+
237
+ label_mask = labels != -100
238
+ label_tokens = int(label_mask.sum().item())
239
+ loss_tokens = int((labels[1:] != -100).sum().item()) if labels.numel() > 1 else 0
240
+ attn_tokens = int(attn.sum().item())
241
+
242
+ total_label_tokens += label_tokens
243
+ total_loss_tokens += loss_tokens
244
+ total_attn_tokens += attn_tokens
245
+ if label_tokens == 0:
246
+ zero_label += 1
247
+
248
+ if i < min(3, n):
249
+ label_pos = label_mask.nonzero(as_tuple=False).view(-1)
250
+ first_label = int(label_pos[0].item()) if label_pos.numel() > 0 else -1
251
+ last_label = int(label_pos[-1].item()) if label_pos.numel() > 0 else -1
252
+
253
+ decoded = ""
254
+ if tokenizer is not None and label_pos.numel() > 0:
255
+ answer_ids = labels[label_pos].tolist()
256
+ decoded = tokenizer.decode(answer_ids, skip_special_tokens=True).strip()
257
+ if len(decoded) > 200:
258
+ decoded = decoded[:200] + "..."
259
+
260
+ raw_item = _get_raw_dataset_item(dataset, i)
261
+ target_chars = None
262
+ target_tokens = None
263
+ if raw_item is not None and tokenizer is not None:
264
+ target_text = str(raw_item.get("target", ""))
265
+ target_chars = len(target_text)
266
+ target_ids = tokenizer(
267
+ f" {target_text}",
268
+ add_special_tokens=False,
269
+ return_tensors="pt",
270
+ ).input_ids.squeeze(0)
271
+ target_tokens = int(target_ids.numel())
272
+
273
+ logger.info(
274
+ f"[DATA DEBUG] {name} sample {i}: attn_tokens={attn_tokens}, "
275
+ f"label_tokens={label_tokens}, loss_tokens={loss_tokens}, "
276
+ f"label_span=[{first_label},{last_label}]"
277
+ )
278
+ if target_chars is not None or decoded:
279
+ logger.info(
280
+ f"[DATA DEBUG] {name} target_chars={target_chars}, "
281
+ f"target_tokens={target_tokens}, decoded_answer={repr(decoded)}"
282
+ )
283
+
284
+ avg_label = total_label_tokens / max(n, 1)
285
+ avg_loss = total_loss_tokens / max(n, 1)
286
+ avg_attn = total_attn_tokens / max(n, 1)
287
+ logger.info(
288
+ f"[DATA DEBUG] {name} summary: samples={n}, zero_label_samples={zero_label}, "
289
+ f"avg_label_tokens={avg_label:.2f}, avg_loss_tokens={avg_loss:.2f}, avg_attn_tokens={avg_attn:.2f}"
290
+ )
291
+
292
+
293
+ class QwenDecoderLayerWithTitansMemory(nn.Module):
294
+ def __init__(
295
+ self,
296
+ base_layer: nn.Module,
297
+ *,
298
+ hidden_size: int,
299
+ chunk_size: int,
300
+ batch_size: int,
301
+ dim_head: int,
302
+ num_heads: int,
303
+ memory_depth: int,
304
+ memory_fp32: bool,
305
+ detach_mem_state: bool,
306
+ parent_model: Optional[nn.Module] = None,
307
+ ):
308
+ super().__init__()
309
+ self.layer = base_layer
310
+ self.memory_fp32 = memory_fp32
311
+ self.detach_mem_state = bool(detach_mem_state)
312
+ self.memory_state: Optional[NeuralMemState] = None
313
+ self.parent_model_ref = weakref.ref(parent_model) if parent_model is not None else None
314
+
315
+ memory_model = MemoryMLP(
316
+ dim=dim_head,
317
+ depth=memory_depth,
318
+ expansion_factor=2.0,
319
+ )
320
+
321
+ self.neural_memory = NeuralMemory(
322
+ dim=hidden_size,
323
+ chunk_size=chunk_size,
324
+ batch_size=batch_size,
325
+ dim_head=dim_head,
326
+ heads=num_heads,
327
+ model=memory_model,
328
+ momentum=True,
329
+ momentum_order=1,
330
+ qk_rmsnorm=True,
331
+ pre_rmsnorm=True,
332
+ default_step_transform_max_lr=1e-2,
333
+ init_adaptive_step_bias=-6.0,
334
+ max_grad_norm=1.0,
335
+ spectral_norm_surprises=True,
336
+ use_accelerated_scan=False,
337
+ )
338
+
339
+ self.mem_gate = nn.Sequential(
340
+ nn.Linear(hidden_size * 2, hidden_size),
341
+ nn.Sigmoid(),
342
+ )
343
+
344
+ try:
345
+ layer_device = next(base_layer.parameters()).device
346
+ layer_dtype = next(base_layer.parameters()).dtype
347
+ except StopIteration:
348
+ layer_device = None
349
+ layer_dtype = None
350
+
351
+ if layer_device is not None:
352
+ mem_dtype = torch.float32 if memory_fp32 else layer_dtype
353
+ self.neural_memory = self.neural_memory.to(device=layer_device, dtype=mem_dtype)
354
+ if layer_dtype is not None:
355
+ self.mem_gate = self.mem_gate.to(device=layer_device, dtype=layer_dtype)
356
+ else:
357
+ self.mem_gate = self.mem_gate.to(device=layer_device)
358
+
359
+ def reset_memory_state(self):
360
+ self.memory_state = None
361
+
362
+ def _get_store_mask(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]:
363
+ parent_model = self.parent_model_ref() if self.parent_model_ref is not None else None
364
+ if parent_model is None or not hasattr(parent_model, "_mem_store_mask"):
365
+ return None
366
+ store_mask = getattr(parent_model, "_mem_store_mask")
367
+ if store_mask is None:
368
+ return None
369
+ store_mask = store_mask.to(device=hidden_states.device).bool()
370
+ if store_mask.shape[:2] != hidden_states.shape[:2]:
371
+ return None
372
+ return store_mask
373
+
374
+ def forward(self, *args, **kwargs):
375
+ outputs = self.layer(*args, **kwargs)
376
+
377
+ if isinstance(outputs, (tuple, list)):
378
+ hidden_states = outputs[0]
379
+ rest = outputs[1:]
380
+ else:
381
+ hidden_states = outputs
382
+ rest = None
383
+
384
+ full_store_mask = self._get_store_mask(hidden_states)
385
+ mem_inp = hidden_states.float() if self.memory_fp32 else hidden_states
386
+
387
+ store_seq = None
388
+ store_mask = full_store_mask
389
+ if store_mask is not None:
390
+ store_seq = mem_inp
391
+ if store_mask.shape[1] > 0 and not store_mask[:, 0].any():
392
+ store_seq = store_seq[:, 1:]
393
+ store_mask = store_mask[:, 1:]
394
+
395
+ store_chunk = self.neural_memory.store_chunk_size
396
+ remainder = store_seq.shape[1] % store_chunk
397
+ if remainder != 0:
398
+ store_seq = store_seq[:, :-remainder]
399
+ store_mask = store_mask[:, :-remainder]
400
+
401
+ if store_mask is not None and store_seq is not None:
402
+ if store_mask.shape[1] != store_seq.shape[1]:
403
+ min_len = min(store_mask.shape[1], store_seq.shape[1])
404
+ store_seq = store_seq[:, :min_len]
405
+ store_mask = store_mask[:, :min_len]
406
+
407
+ if store_seq.shape[1] == 0:
408
+ store_seq = None
409
+ store_mask = None
410
+
411
+ mem_ctx = (
412
+ torch.amp.autocast(device_type=hidden_states.device.type, enabled=False)
413
+ if self.memory_fp32
414
+ else nullcontext()
415
+ )
416
+ with mem_ctx:
417
+ retrieved, next_state = self.neural_memory(
418
+ mem_inp,
419
+ store_seq=store_seq,
420
+ state=self.memory_state,
421
+ store_mask=store_mask,
422
+ detach_mem_state=self.detach_mem_state,
423
+ )
424
+ self.memory_state = next_state
425
+
426
+ if retrieved is not None:
427
+ retrieved = retrieved.to(dtype=hidden_states.dtype)
428
+ if full_store_mask is not None and full_store_mask.shape[:2] == retrieved.shape[:2]:
429
+ retrieved = retrieved * full_store_mask.unsqueeze(-1).to(dtype=retrieved.dtype)
430
+ gate = self.mem_gate(torch.cat([hidden_states, retrieved], dim=-1))
431
+ hidden_states = hidden_states + gate * retrieved
432
+
433
+ if rest is None:
434
+ return hidden_states
435
+ return (hidden_states, *rest)
436
+
437
+
438
+ class QwenTitansForBABILong(nn.Module):
439
+ def __init__(self, qwen_model, config: TrainingConfig):
440
+ super().__init__()
441
+ self.qwen = qwen_model
442
+ self.config = config
443
+ self.hidden_size = qwen_model.config.hidden_size
444
+ self.use_memory = bool(getattr(config, "use_memory", True))
445
+
446
+ if self.use_memory:
447
+ self.memory_layer_stride = int(getattr(config, "memory_layer_stride", 4))
448
+ self.memory_layer_indices = [
449
+ idx for idx in range(len(self.qwen.model.layers)) if idx % self.memory_layer_stride == 0
450
+ ]
451
+
452
+ for layer_idx in self.memory_layer_indices:
453
+ base_layer = self.qwen.model.layers[layer_idx]
454
+ wrapped = QwenDecoderLayerWithTitansMemory(
455
+ base_layer,
456
+ hidden_size=self.hidden_size,
457
+ chunk_size=config.memory_chunk_size,
458
+ batch_size=config.memory_batch_size,
459
+ dim_head=config.memory_dim_head,
460
+ num_heads=config.memory_heads,
461
+ memory_depth=config.memory_depth,
462
+ memory_fp32=config.memory_fp32,
463
+ detach_mem_state=config.detach_mem_state,
464
+ parent_model=self.qwen.model,
465
+ )
466
+ self.qwen.model.layers[layer_idx] = wrapped
467
+ else:
468
+ self.memory_layer_stride = 0
469
+ self.memory_layer_indices = []
470
+
471
+ if self.use_memory:
472
+ logger.info("[QwenTitansForBABILong] Initialized")
473
+ logger.info(f" - hidden_size: {self.hidden_size}")
474
+ logger.info(f" - chunk_size: {config.chunk_size}")
475
+ logger.info(f" - memory_layer_stride: {self.memory_layer_stride}")
476
+ logger.info(f" - memory_layers: {self.memory_layer_indices}")
477
+ else:
478
+ logger.info("[QwenTitansForBABILong] Initialized (memory disabled)")
479
+ logger.info(f" - hidden_size: {self.hidden_size}")
480
+ logger.info(f" - chunk_size: {config.chunk_size}")
481
+
482
+ self._memory_layers = [
483
+ layer for layer in self.qwen.model.layers if isinstance(layer, QwenDecoderLayerWithTitansMemory)
484
+ ]
485
+ self.qwen.model._mem_store_mask = None
486
+
487
+ def _split_into_chunks(self, tensor, chunk_size):
488
+ seq_len = tensor.shape[1]
489
+ chunks = []
490
+ for start in range(0, seq_len, chunk_size):
491
+ end = min(start + chunk_size, seq_len)
492
+ chunks.append((start, end, tensor[:, start:end]))
493
+ return chunks
494
+
495
+ def reset_memory_states(self):
496
+ for layer in self._memory_layers:
497
+ layer.reset_memory_state()
498
+
499
+ def _set_mem_store_mask(
500
+ self,
501
+ chunk_ids: torch.Tensor,
502
+ chunk_mask: Optional[torch.Tensor],
503
+ chunk_start: int,
504
+ ) -> None:
505
+ if not self.use_memory:
506
+ self.qwen.model._mem_store_mask = None
507
+ return
508
+ if chunk_mask is None:
509
+ if chunk_start > 0:
510
+ store_mask = torch.ones_like(chunk_ids, dtype=torch.bool)
511
+ store_mask[:, 0] = False
512
+ else:
513
+ store_mask = None
514
+ else:
515
+ store_mask = chunk_mask.to(device=chunk_ids.device).bool()
516
+ if chunk_start > 0:
517
+ store_mask[:, 0] = False
518
+ self.qwen.model._mem_store_mask = store_mask
519
+
520
+ def get_memory_modules(self) -> List[nn.Module]:
521
+ if not self._memory_layers:
522
+ return []
523
+ modules = []
524
+ for layer in self._memory_layers:
525
+ modules.append(layer.neural_memory)
526
+ modules.append(layer.mem_gate)
527
+ return modules
528
+
529
+ def forward(
530
+ self,
531
+ input_ids: torch.Tensor,
532
+ attention_mask: Optional[torch.Tensor] = None,
533
+ labels: Optional[torch.Tensor] = None,
534
+ return_pred_tokens: bool = False,
535
+ topk: int = 0,
536
+ chunkwise_backward: bool = False,
537
+ loss_scale: Optional[float] = None,
538
+ backward_fn: Optional[Callable[[torch.Tensor], None]] = None,
539
+ chunk_start: Optional[int] = None,
540
+ chunk_end: Optional[int] = None,
541
+ reset_mem_state: bool = False,
542
+ ) -> Dict[str, torch.Tensor]:
543
+ if chunk_start is not None or chunk_end is not None:
544
+ start = 0 if chunk_start is None else int(chunk_start)
545
+ end = int(chunk_end) if chunk_end is not None else None
546
+ return self._forward_single_chunk(
547
+ input_ids=input_ids,
548
+ attention_mask=attention_mask,
549
+ labels=labels,
550
+ chunk_start=start,
551
+ chunk_end=end,
552
+ reset_mem_state=reset_mem_state,
553
+ )
554
+ batch_size, seq_len = input_ids.shape
555
+ chunk_size = self.config.chunk_size
556
+ chunks = self._split_into_chunks(input_ids, chunk_size)
557
+
558
+ self.reset_memory_states()
559
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
560
+ total_loss_sum = None
561
+ total_loss_tokens = 0
562
+ topk_correct = None
563
+ topk_total = None
564
+
565
+ pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
566
+ target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
567
+ if topk and topk > 0:
568
+ device = input_ids.device
569
+ topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32)
570
+ topk_total = torch.tensor(0.0, device=device, dtype=torch.float32)
571
+
572
+ for start, end, _ in chunks:
573
+ proc_start = max(0, start - 1)
574
+ chunk_ids = input_ids[:, proc_start:end]
575
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
576
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
577
+
578
+ self._set_mem_store_mask(chunk_ids, chunk_mask, start)
579
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
580
+ if self.use_memory:
581
+ self.qwen.model._mem_store_mask = None
582
+
583
+ if chunk_labels is not None and (chunk_labels != -100).any():
584
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
585
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
586
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
587
+
588
+ valid = shift_labels != -100
589
+ if valid.any():
590
+ hs = shift_hidden[valid]
591
+ targets = shift_labels[valid]
592
+
593
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
594
+
595
+ logits = self.qwen.lm_head(hs)
596
+ logits = logits.float()
597
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
598
+ targets = targets.to(device=logits.device)
599
+
600
+ chunk_loss_sum = loss_fct_sum(logits, targets)
601
+ if total_loss_sum is None:
602
+ total_loss_sum = chunk_loss_sum
603
+ else:
604
+ total_loss_sum = total_loss_sum + chunk_loss_sum
605
+ total_loss_tokens += targets.numel()
606
+
607
+ if topk and topk > 0:
608
+ k = min(int(topk), logits.shape[-1])
609
+ topk_ids = torch.topk(logits, k=k, dim=-1).indices
610
+ correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1)
611
+ topk_correct = topk_correct + correct.float().sum()
612
+ topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device)
613
+
614
+ if return_pred_tokens:
615
+ idx = valid.nonzero(as_tuple=False)
616
+ pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist()
617
+ tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist()
618
+ b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist()
619
+
620
+ for i, b_idx in enumerate(b_idx_flat):
621
+ pred_tokens_by_sample[b_idx].append(int(pred_flat[i]))
622
+ target_tokens_by_sample[b_idx].append(int(tgt_flat[i]))
623
+
624
+ if total_loss_sum is None or total_loss_tokens == 0:
625
+ device = next(self.qwen.parameters()).device
626
+ loss = torch.zeros((), device=device, dtype=torch.float32)
627
+ else:
628
+ loss = total_loss_sum / total_loss_tokens
629
+
630
+ out: Dict[str, torch.Tensor] = {"loss": loss}
631
+ if return_pred_tokens:
632
+ lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long)
633
+ max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0
634
+ if max_len > 0:
635
+ pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
636
+ tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
637
+ for b in range(batch_size):
638
+ L = int(lengths[b].item())
639
+ if L > 0:
640
+ pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long)
641
+ tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long)
642
+ else:
643
+ pred_mat = torch.empty((batch_size, 0), dtype=torch.long)
644
+ tgt_mat = torch.empty((batch_size, 0), dtype=torch.long)
645
+ out["pred_ids"] = pred_mat
646
+ out["target_ids"] = tgt_mat
647
+ out["target_lengths"] = lengths
648
+ if topk and topk > 0 and topk_correct is not None and topk_total is not None:
649
+ out["topk_correct"] = topk_correct
650
+ out["topk_total"] = topk_total
651
+ return out
652
+
653
+ def _forward_single_chunk(
654
+ self,
655
+ input_ids: torch.Tensor,
656
+ attention_mask: Optional[torch.Tensor],
657
+ labels: Optional[torch.Tensor],
658
+ chunk_start: int,
659
+ chunk_end: Optional[int],
660
+ reset_mem_state: bool,
661
+ ) -> Dict[str, torch.Tensor]:
662
+ if reset_mem_state:
663
+ self.reset_memory_states()
664
+
665
+ seq_len = input_ids.shape[1]
666
+ end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len)
667
+ end = min(int(end), seq_len)
668
+ start = max(0, int(chunk_start))
669
+
670
+ proc_start = max(0, start - 1)
671
+ chunk_ids = input_ids[:, proc_start:end]
672
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
673
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
674
+
675
+ self._set_mem_store_mask(chunk_ids, chunk_mask, start)
676
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
677
+ if self.use_memory:
678
+ self.qwen.model._mem_store_mask = None
679
+
680
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
681
+ total_loss_sum = None
682
+ total_loss_tokens = 0
683
+
684
+ if chunk_labels is not None and (chunk_labels != -100).any():
685
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
686
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
687
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
688
+
689
+ valid = shift_labels != -100
690
+ if valid.any():
691
+ hs = shift_hidden[valid]
692
+ targets = shift_labels[valid]
693
+
694
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
695
+ logits = self.qwen.lm_head(hs)
696
+ logits = logits.float()
697
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
698
+ targets = targets.to(device=logits.device)
699
+
700
+ total_loss_sum = loss_fct_sum(logits, targets)
701
+ total_loss_tokens = targets.numel()
702
+
703
+ if total_loss_sum is None:
704
+ # 创建一个有梯度图的零 loss,通过 hidden_full 建立连接
705
+ # 这对 DDP 至关重要:确保所有 rank 的 backward 调用一致
706
+ total_loss_sum = (hidden_full.float().sum() * 0.0)
707
+
708
+ return {
709
+ "loss_sum": total_loss_sum,
710
+ "loss_tokens": total_loss_tokens,
711
+ "has_grad": True, # 现在总是有梯度图
712
+ }
713
+
714
+ def _process_chunk(
715
+ self,
716
+ chunk_ids: torch.Tensor,
717
+ chunk_attention_mask: Optional[torch.Tensor] = None,
718
+ ) -> torch.Tensor:
719
+ if hasattr(self.qwen.model, "embed_tokens"):
720
+ token_embeds = self.qwen.model.embed_tokens(chunk_ids)
721
+ else:
722
+ token_embeds = self.qwen.get_input_embeddings()(chunk_ids)
723
+
724
+ outputs = self.qwen.model(
725
+ inputs_embeds=token_embeds,
726
+ attention_mask=chunk_attention_mask,
727
+ use_cache=False,
728
+ output_hidden_states=False,
729
+ return_dict=True,
730
+ )
731
+ return outputs.last_hidden_state
732
+
733
+ def get_param_groups(self, lr_memory: float, lr_pretrained: float, weight_decay: float):
734
+ memory_params = []
735
+ pretrained_params = []
736
+
737
+ for name, param in self.named_parameters():
738
+ if not param.requires_grad:
739
+ continue
740
+ if "neural_memory" in name or "mem_gate" in name:
741
+ memory_params.append(param)
742
+ else:
743
+ pretrained_params.append(param)
744
+
745
+ param_groups = []
746
+ if len(memory_params) > 0:
747
+ param_groups.append(
748
+ {"params": memory_params, "lr": lr_memory, "weight_decay": weight_decay, "name": "memory_module"}
749
+ )
750
+ if len(pretrained_params) > 0:
751
+ param_groups.append(
752
+ {"params": pretrained_params, "lr": lr_pretrained, "weight_decay": weight_decay, "name": "pretrained"}
753
+ )
754
+ logger.info(f"Param groups: memory={len(memory_params)}, pretrained={len(pretrained_params)}")
755
+ return param_groups
756
+
757
+
758
+ def init_distributed() -> tuple[bool, int, int, int]:
759
+ if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ:
760
+ return False, 0, 0, 1
761
+
762
+ rank = int(os.environ["RANK"])
763
+ world_size = int(os.environ["WORLD_SIZE"])
764
+ local_rank = int(os.environ.get("LOCAL_RANK", 0))
765
+
766
+ if not dist.is_available():
767
+ raise RuntimeError("torch.distributed not available")
768
+
769
+ if not dist.is_initialized():
770
+ dist.init_process_group(backend="nccl", init_method="env://")
771
+
772
+ torch.cuda.set_device(local_rank)
773
+ return True, rank, local_rank, world_size
774
+
775
+
776
+ def cleanup_distributed():
777
+ if dist.is_available() and dist.is_initialized():
778
+ dist.barrier()
779
+ dist.destroy_process_group()
780
+
781
+
782
+ def unwrap_model(model: nn.Module) -> nn.Module:
783
+ if hasattr(model, "module"):
784
+ return model.module
785
+ if hasattr(model, "_fsdp_wrapped_module"):
786
+ wrapped = getattr(model, "_fsdp_wrapped_module", None)
787
+ if wrapped is not None and hasattr(wrapped, "module"):
788
+ return wrapped.module
789
+ return model
790
+
791
+
792
+ def is_fsdp_model(model: nn.Module) -> bool:
793
+ try:
794
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
795
+ return isinstance(model, FSDP)
796
+ except Exception:
797
+ return False
798
+
799
+
800
+ class Trainer:
801
+ def __init__(
802
+ self,
803
+ model: QwenTitansForBABILong,
804
+ train_dataloader: DataLoader,
805
+ eval_dataloader: DataLoader,
806
+ config: TrainingConfig,
807
+ rank: int = 0,
808
+ world_size: int = 1,
809
+ is_distributed: bool = False,
810
+ tokenizer=None,
811
+ ):
812
+ self.model = model
813
+ self.train_dataloader = train_dataloader
814
+ self.eval_dataloader = eval_dataloader
815
+ self.config = config
816
+ self.device = next(model.parameters()).device
817
+ self.rank = rank
818
+ self.world_size = world_size
819
+ self.is_distributed = is_distributed
820
+ self.is_main_process = (rank == 0)
821
+ self.tokenizer = tokenizer
822
+
823
+ base_model = unwrap_model(self.model)
824
+ param_groups = base_model.get_param_groups(
825
+ lr_memory=config.lr_memory,
826
+ lr_pretrained=config.lr_pretrained,
827
+ weight_decay=config.weight_decay,
828
+ )
829
+ self.optimizer = AdamW(param_groups)
830
+
831
+ total_steps = math.ceil(
832
+ (len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1)
833
+ )
834
+ self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7)
835
+
836
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
837
+ self.global_step = 0
838
+
839
+ def _get_group_lr(self, group_name: str) -> Optional[float]:
840
+ for group in self.optimizer.param_groups:
841
+ if group.get("name") == group_name:
842
+ return group.get("lr")
843
+ return None
844
+
845
+ def train(self):
846
+ self.model.train()
847
+ if self.is_main_process:
848
+ logger.info("Start training")
849
+
850
+ last_epoch_loss = None
851
+ for epoch in range(self.config.num_epochs):
852
+ sampler = getattr(self.train_dataloader, "sampler", None)
853
+ if sampler is not None and hasattr(sampler, "set_epoch"):
854
+ sampler.set_epoch(epoch)
855
+ if self.is_main_process:
856
+ logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}")
857
+
858
+ epoch_loss = 0.0
859
+ num_batches = 0
860
+
861
+ pbar = self.train_dataloader
862
+ if self.is_main_process:
863
+ pbar = tqdm(
864
+ self.train_dataloader,
865
+ desc=f"Epoch {epoch + 1}/{self.config.num_epochs}",
866
+ leave=False,
867
+ dynamic_ncols=True,
868
+ )
869
+ for step, batch in enumerate(pbar):
870
+ batch = {k: v.to(self.device) for k, v in batch.items()}
871
+ if (
872
+ self.config.debug_label_batches > 0
873
+ and self.is_main_process
874
+ and step < int(self.config.debug_label_batches)
875
+ ):
876
+ labels = batch.get("labels")
877
+ if labels is not None:
878
+ label_tokens = int((labels != -100).sum().item())
879
+ loss_tokens = int((labels[:, 1:] != -100).sum().item()) if labels.size(1) > 1 else 0
880
+ attn_tokens = int(batch["attention_mask"].sum().item())
881
+ logger.info(
882
+ f"[BATCH DEBUG] epoch={epoch + 1} step={step + 1}: "
883
+ f"attn_tokens={attn_tokens}, label_tokens={label_tokens}, loss_tokens={loss_tokens}"
884
+ )
885
+ else:
886
+ logger.info(f"[BATCH DEBUG] epoch={epoch + 1} step={step + 1}: labels missing")
887
+
888
+ ga = max(self.config.gradient_accumulation_steps, 1)
889
+ sync_gradients = ((step + 1) % ga == 0)
890
+ amp_enabled = self.config.fp16 or self.config.bf16
891
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
892
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
893
+ if self.config.chunkwise_backward:
894
+ labels = batch.get("labels")
895
+ if labels is not None:
896
+ total_tokens = int((labels[:, 1:] != -100).sum().item())
897
+ else:
898
+ total_tokens = 0
899
+ loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga)
900
+
901
+ seq_len = batch["input_ids"].shape[1]
902
+ chunk_size = int(self.config.chunk_size)
903
+ chunk_ranges = [
904
+ (start, min(start + chunk_size, seq_len))
905
+ for start in range(0, seq_len, chunk_size)
906
+ ]
907
+ raw_loss_sum = None
908
+
909
+ for idx, (start, end) in enumerate(chunk_ranges):
910
+ is_last_chunk = (idx == len(chunk_ranges) - 1)
911
+ sync_chunk = sync_gradients and is_last_chunk
912
+ chunk_ctx = (
913
+ self.model.no_sync
914
+ if (self.is_distributed and not sync_chunk)
915
+ else nullcontext
916
+ )
917
+ with chunk_ctx():
918
+ outputs = self.model(
919
+ input_ids=batch["input_ids"],
920
+ attention_mask=batch["attention_mask"],
921
+ labels=labels,
922
+ chunk_start=start,
923
+ chunk_end=end,
924
+ reset_mem_state=(idx == 0),
925
+ )
926
+ chunk_loss_sum = outputs["loss_sum"]
927
+ chunk_loss_tokens = int(outputs.get("loss_tokens", 0))
928
+ if raw_loss_sum is None:
929
+ raw_loss_sum = chunk_loss_sum.detach()
930
+ else:
931
+ raw_loss_sum = raw_loss_sum + chunk_loss_sum.detach()
932
+
933
+ # DDP 关键:所有 rank 必须执行相同的 backward 调用序列
934
+ # 即使 loss_scale=0 或 chunk 无有效 token,也要调用 backward
935
+ # 以确保 allreduce 同步
936
+ scaled_loss = chunk_loss_sum * float(loss_scale)
937
+ if self.config.fp16:
938
+ self.scaler.scale(scaled_loss).backward()
939
+ else:
940
+ scaled_loss.backward()
941
+
942
+ if raw_loss_sum is None or total_tokens == 0:
943
+ raw_loss = torch.zeros((), device=self.device, dtype=torch.float32)
944
+ else:
945
+ raw_loss = raw_loss_sum / total_tokens
946
+ loss = raw_loss / ga
947
+ else:
948
+ ctx = self.model.no_sync if (self.is_distributed and not sync_gradients) else nullcontext
949
+ with ctx():
950
+ outputs = self.model(
951
+ input_ids=batch["input_ids"],
952
+ attention_mask=batch["attention_mask"],
953
+ labels=batch["labels"],
954
+ )
955
+ raw_loss = outputs["loss"]
956
+ loss = raw_loss / ga
957
+
958
+ if self.config.fp16:
959
+ self.scaler.scale(loss).backward()
960
+ else:
961
+ loss.backward()
962
+
963
+ epoch_loss += raw_loss.detach().float().item()
964
+ num_batches += 1
965
+
966
+ if sync_gradients:
967
+ grad_norm = None
968
+ if self.config.fp16:
969
+ self.scaler.unscale_(self.optimizer)
970
+ grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
971
+ self.scaler.step(self.optimizer)
972
+ self.scaler.update()
973
+ else:
974
+ grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
975
+ self.optimizer.step()
976
+
977
+ self.scheduler.step()
978
+ self.optimizer.zero_grad(set_to_none=True)
979
+ self.global_step += 1
980
+
981
+ if self.is_main_process:
982
+ avg_loss = epoch_loss / max(num_batches, 1)
983
+ pbar.set_postfix(
984
+ {
985
+ "gstep": self.global_step,
986
+ "loss": f"{avg_loss:.4f}",
987
+ }
988
+ )
989
+
990
+ if self.global_step % self.config.logging_steps == 0 and self.is_main_process:
991
+ lr_mem = self._get_group_lr("memory_module")
992
+ lr_pre = self._get_group_lr("pretrained")
993
+ if lr_pre is None and self.optimizer.param_groups:
994
+ lr_pre = self.optimizer.param_groups[0]["lr"]
995
+ grad_note = ""
996
+ if self.config.debug_grad_norm and grad_norm is not None:
997
+ grad_note = f" | grad_norm={float(grad_norm):.4f}"
998
+ if lr_mem is None:
999
+ lr_label = f"lr={lr_pre:.2e}" if lr_pre is not None else "lr=NA"
1000
+ logger.info(
1001
+ f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | "
1002
+ f"{lr_label}{grad_note}"
1003
+ )
1004
+ else:
1005
+ logger.info(
1006
+ f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | "
1007
+ f"lr_mem={lr_mem:.2e} | lr_pre={lr_pre:.2e}{grad_note}"
1008
+ )
1009
+
1010
+ if self.global_step % self.config.eval_steps == 0:
1011
+ eval_metrics = self.evaluate()
1012
+ if self.is_main_process:
1013
+ logger.info(
1014
+ f"Step {self.global_step}: "
1015
+ f"eval_loss={eval_metrics['loss']:.4f}, "
1016
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1017
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1018
+ )
1019
+ self.model.train()
1020
+
1021
+ avg_epoch_loss = epoch_loss / max(num_batches, 1)
1022
+ if self.is_distributed:
1023
+ t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32)
1024
+ dist.all_reduce(t, op=dist.ReduceOp.SUM)
1025
+ avg_epoch_loss = (t / self.world_size).item()
1026
+
1027
+ if self.is_main_process:
1028
+ logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}")
1029
+ last_epoch_loss = avg_epoch_loss
1030
+
1031
+ eval_metrics = self.evaluate()
1032
+ if self.is_main_process:
1033
+ logger.info(
1034
+ f"[EPOCH {epoch + 1} EVAL] "
1035
+ f"eval_loss={eval_metrics['loss']:.4f}, "
1036
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1037
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1038
+ )
1039
+ self._append_eval_metrics(
1040
+ eval_metrics,
1041
+ phase="epoch",
1042
+ epoch=int(epoch + 1),
1043
+ train_avg_loss=avg_epoch_loss,
1044
+ )
1045
+ self.model.train()
1046
+
1047
+ if self.is_main_process:
1048
+ logger.info("Training done, final evaluation")
1049
+
1050
+ final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples))
1051
+ if self.is_main_process:
1052
+ ppl = float(math.exp(min(20.0, final_eval["loss"])))
1053
+ logger.info(
1054
+ f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl≈{ppl:.3f}, "
1055
+ f"em_acc={final_eval['em_acc'] * 100:.2f}%, "
1056
+ f"tok_acc={final_eval['tok_acc'] * 100:.2f}%"
1057
+ )
1058
+ logger.info("Saving final checkpoint")
1059
+ self._append_eval_metrics(
1060
+ final_eval,
1061
+ phase="final",
1062
+ epoch=int(self.config.num_epochs),
1063
+ train_avg_loss=last_epoch_loss,
1064
+ )
1065
+ self.save_final_checkpoint()
1066
+
1067
+ @torch.no_grad()
1068
+ def evaluate(self, print_examples: int = 0) -> Dict[str, float]:
1069
+ self.model.eval()
1070
+ total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1071
+ total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1072
+
1073
+ total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1074
+ total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1075
+ total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1076
+ total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1077
+ total_topk_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1078
+ total_topk_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1079
+ printed = 0
1080
+
1081
+ for batch in self.eval_dataloader:
1082
+ batch = {k: v.to(self.device) for k, v in batch.items()}
1083
+ amp_enabled = self.config.fp16 or self.config.bf16
1084
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
1085
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
1086
+ outputs = self.model(
1087
+ input_ids=batch["input_ids"],
1088
+ attention_mask=batch["attention_mask"],
1089
+ labels=batch["labels"],
1090
+ return_pred_tokens=True,
1091
+ topk=int(self.config.eval_topk) if self.config.eval_topk else 0,
1092
+ )
1093
+
1094
+ if torch.isfinite(outputs["loss"]):
1095
+ total_loss += outputs["loss"].detach().float()
1096
+ total_batches += 1.0
1097
+
1098
+ pred_ids = outputs.get("pred_ids", None)
1099
+ target_ids = outputs.get("target_ids", None)
1100
+ lengths = outputs.get("target_lengths", None)
1101
+ topk_correct = outputs.get("topk_correct", None)
1102
+ topk_total = outputs.get("topk_total", None)
1103
+ if topk_correct is not None and topk_total is not None:
1104
+ total_topk_correct += topk_correct.detach().float()
1105
+ total_topk_total += topk_total.detach().float()
1106
+ if (
1107
+ pred_ids is not None
1108
+ and target_ids is not None
1109
+ and lengths is not None
1110
+ and pred_ids.ndim == 2
1111
+ and target_ids.ndim == 2
1112
+ and lengths.ndim == 1
1113
+ and pred_ids.shape == target_ids.shape
1114
+ and pred_ids.shape[0] == lengths.shape[0]
1115
+ ):
1116
+ pred_cpu = pred_ids.to("cpu", dtype=torch.long)
1117
+ tgt_cpu = target_ids.to("cpu", dtype=torch.long)
1118
+ len_cpu = lengths.to("cpu", dtype=torch.long)
1119
+
1120
+ for i in range(int(len_cpu.shape[0])):
1121
+ L = int(len_cpu[i].item())
1122
+ if L <= 0:
1123
+ continue
1124
+ p = pred_cpu[i, :L]
1125
+ t = tgt_cpu[i, :L]
1126
+
1127
+ total_tok_correct += torch.tensor(float((p == t).sum().item()), device=self.device, dtype=torch.float32)
1128
+ total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32)
1129
+
1130
+ if self.tokenizer is not None:
1131
+ pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip()
1132
+ tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip()
1133
+ em = float(pred_text == tgt_text)
1134
+ total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32)
1135
+ total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32)
1136
+
1137
+ if self.is_main_process and printed < print_examples:
1138
+ logger.info(f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}")
1139
+ printed += 1
1140
+
1141
+ if self.is_distributed:
1142
+ dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
1143
+ dist.all_reduce(total_batches, op=dist.ReduceOp.SUM)
1144
+ dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM)
1145
+ dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM)
1146
+ dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM)
1147
+ dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM)
1148
+ dist.all_reduce(total_topk_correct, op=dist.ReduceOp.SUM)
1149
+ dist.all_reduce(total_topk_total, op=dist.ReduceOp.SUM)
1150
+
1151
+ avg_loss = (total_loss / total_batches.clamp(min=1.0)).item()
1152
+ tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item()
1153
+ em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item()
1154
+ topk_acc = (total_topk_correct / total_topk_total.clamp(min=1.0)).item()
1155
+ if self.is_main_process:
1156
+ if self.config.debug_eval_stats:
1157
+ logger.info(
1158
+ "[EVAL DEBUG] total_batches="
1159
+ f"{float(total_batches.item()):.0f}, total_tok_total={float(total_tok_total.item()):.0f}, "
1160
+ f"total_em_total={float(total_em_total.item()):.0f}, "
1161
+ f"total_topk_total={float(total_topk_total.item()):.0f}"
1162
+ )
1163
+ if total_tok_total.item() == 0:
1164
+ logger.warning("[EVAL DEBUG] No answer tokens found in eval set; acc will be 0.")
1165
+ logger.info(f"[EVAL METRIC] token_acc(answer-only) = {tok_acc * 100:.2f}%")
1166
+ logger.info(f"[EVAL METRIC] EM/acc(answer-only) = {em_acc * 100:.2f}%")
1167
+ if self.config.eval_topk and self.config.eval_topk > 0:
1168
+ logger.info(f"[EVAL METRIC] top{int(self.config.eval_topk)}_acc(answer-only) = {topk_acc * 100:.2f}%")
1169
+ return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc, "topk_acc": topk_acc}
1170
+
1171
+ def _append_eval_metrics(
1172
+ self,
1173
+ metrics: Dict[str, float],
1174
+ *,
1175
+ phase: str,
1176
+ epoch: Optional[int],
1177
+ train_avg_loss: Optional[float],
1178
+ ) -> None:
1179
+ if not self.is_main_process:
1180
+ return
1181
+ os.makedirs(self.config.output_dir, exist_ok=True)
1182
+ record = {
1183
+ "phase": phase,
1184
+ "epoch": epoch,
1185
+ "global_step": int(self.global_step),
1186
+ "train_avg_loss": None if train_avg_loss is None else float(train_avg_loss),
1187
+ "eval_loss": float(metrics.get("loss", 0.0)),
1188
+ "em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0),
1189
+ "tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0),
1190
+ }
1191
+ metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl")
1192
+ with open(metrics_path, "a") as f:
1193
+ f.write(json.dumps(record) + "\n")
1194
+
1195
+ def save_final_checkpoint(self):
1196
+ ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name)
1197
+ base_model = unwrap_model(self.model)
1198
+ memory_sd = {
1199
+ name: p.detach().cpu()
1200
+ for name, p in base_model.named_parameters()
1201
+ if ("neural_memory" in name) or ("mem_gate" in name)
1202
+ }
1203
+
1204
+ if is_fsdp_model(self.model) and len(memory_sd) == 0:
1205
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1206
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1207
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1208
+ full_sd = self.model.state_dict()
1209
+ memory_sd = {k: v for k, v in full_sd.items() if ("neural_memory" in k) or ("mem_gate" in k)}
1210
+
1211
+ if self.is_main_process:
1212
+ torch.save(
1213
+ {"memory_state_dict": memory_sd, "global_step": self.global_step, "config": asdict(self.config)},
1214
+ ckpt_path,
1215
+ )
1216
+ logger.info(f"Saved memory checkpoint: {ckpt_path}")
1217
+ if self.is_distributed:
1218
+ dist.barrier()
1219
+
1220
+ if self.config.save_full_checkpoint:
1221
+ full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name)
1222
+ if is_fsdp_model(self.model):
1223
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1224
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1225
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1226
+ full_sd = self.model.state_dict()
1227
+ else:
1228
+ full_sd = unwrap_model(self.model).state_dict()
1229
+
1230
+ if self.is_main_process:
1231
+ torch.save(
1232
+ {"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)},
1233
+ full_ckpt_path,
1234
+ )
1235
+ logger.info(f"Saved full checkpoint: {full_ckpt_path}")
1236
+ if self.is_distributed:
1237
+ dist.barrier()
1238
+
1239
+
1240
+ def main():
1241
+ from transformers import AutoModelForCausalLM, AutoTokenizer
1242
+
1243
+ parser = argparse.ArgumentParser()
1244
+ parser.add_argument("--fsdp", action="store_true")
1245
+ parser.add_argument("--eval_only", action="store_true")
1246
+ parser.add_argument("--ckpt_path", type=str, default=None)
1247
+ parser.add_argument("--max_eval_samples", type=int, default=None)
1248
+ parser.add_argument("--max_samples", type=int, default=None)
1249
+ parser.add_argument("--max_length", type=int, default=None)
1250
+ parser.add_argument("--output_dir", type=str, default=None)
1251
+ parser.add_argument("--num_epochs", type=int, default=None)
1252
+ parser.add_argument("--eval_steps", type=int, default=None)
1253
+ parser.add_argument("--eval_topk", type=int, default=0)
1254
+ parser.add_argument("--batch_size", type=int, default=None)
1255
+ parser.add_argument("--gradient_accumulation_steps", type=int, default=None)
1256
+ parser.add_argument("--chunk_size", type=int, default=None)
1257
+ parser.add_argument("--memory_layer_stride", type=int, default=None)
1258
+ parser.add_argument("--no_memory", action="store_true")
1259
+ parser.add_argument("--gradient_checkpointing", action="store_true")
1260
+ parser.add_argument("--no_chunkwise_backward", action="store_true")
1261
+ parser.add_argument("--log_every_batches", type=int, default=80)
1262
+ parser.add_argument("--label_prefix_tokens", type=int, default=0)
1263
+ parser.add_argument(
1264
+ "--no_detach_mem_state",
1265
+ action="store_true",
1266
+ help="Do not detach memory state across chunks (allows cross-chunk gradients)",
1267
+ )
1268
+ parser.add_argument("--debug_data_samples", type=int, default=0)
1269
+ parser.add_argument("--debug_label_batches", type=int, default=0)
1270
+ parser.add_argument("--debug_eval_stats", action="store_true")
1271
+ parser.add_argument("--debug_grad_norm", action="store_true")
1272
+ args = parser.parse_args()
1273
+
1274
+ config = TrainingConfig()
1275
+ if args.fsdp:
1276
+ config.use_fsdp = True
1277
+ if args.no_memory:
1278
+ config.use_memory = False
1279
+ if args.max_samples is not None:
1280
+ config.max_samples = args.max_samples
1281
+ if args.max_length is not None:
1282
+ config.max_length = int(args.max_length)
1283
+ if args.output_dir is not None:
1284
+ config.output_dir = args.output_dir
1285
+ elif not config.use_memory:
1286
+ config.output_dir = "./outputs/qwen_babilong_no_memory"
1287
+ if args.num_epochs is not None:
1288
+ config.num_epochs = args.num_epochs
1289
+ if args.eval_steps is not None:
1290
+ config.eval_steps = args.eval_steps
1291
+ if args.eval_topk is not None:
1292
+ config.eval_topk = int(args.eval_topk)
1293
+ if args.batch_size is not None:
1294
+ config.batch_size = int(args.batch_size)
1295
+ if args.gradient_accumulation_steps is not None:
1296
+ config.gradient_accumulation_steps = int(args.gradient_accumulation_steps)
1297
+ if args.chunk_size is not None:
1298
+ config.chunk_size = int(args.chunk_size)
1299
+ if args.memory_layer_stride is not None:
1300
+ config.memory_layer_stride = int(args.memory_layer_stride)
1301
+ if args.gradient_checkpointing:
1302
+ config.gradient_checkpointing = True
1303
+ if args.no_chunkwise_backward:
1304
+ config.chunkwise_backward = False
1305
+ if args.label_prefix_tokens is not None:
1306
+ config.label_prefix_tokens = int(args.label_prefix_tokens)
1307
+ if args.no_detach_mem_state:
1308
+ config.detach_mem_state = False # 允许记忆梯度跨chunk传播
1309
+ if args.log_every_batches is not None:
1310
+ config.log_every_batches = int(args.log_every_batches)
1311
+ ga = max(int(config.gradient_accumulation_steps), 1)
1312
+ config.logging_steps = max(1, math.ceil(config.log_every_batches / ga))
1313
+ if args.debug_data_samples is not None:
1314
+ config.debug_data_samples = int(args.debug_data_samples)
1315
+ if args.debug_label_batches is not None:
1316
+ config.debug_label_batches = int(args.debug_label_batches)
1317
+ if args.debug_eval_stats:
1318
+ config.debug_eval_stats = True
1319
+ if args.debug_grad_norm:
1320
+ config.debug_grad_norm = True
1321
+
1322
+ is_distributed, rank, local_rank, world_size = init_distributed()
1323
+ is_main = (rank == 0)
1324
+
1325
+ if config.use_fsdp and config.chunkwise_backward:
1326
+ if is_main:
1327
+ logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.")
1328
+ config.chunkwise_backward = False
1329
+
1330
+ if is_distributed and (not config.use_fsdp) and config.gradient_checkpointing:
1331
+ config.gradient_checkpointing = False
1332
+ if is_main:
1333
+ logger.warning("gradient_checkpointing is unstable with DDP here; disabling it.")
1334
+
1335
+ if is_distributed and (not config.use_fsdp) and config.chunkwise_backward:
1336
+ if is_main:
1337
+ logger.info("DDP chunkwise backward enabled via per-chunk forward/backward.")
1338
+
1339
+ if is_distributed and (not config.use_fsdp):
1340
+ if not config.ddp_find_unused_parameters:
1341
+ config.ddp_find_unused_parameters = True
1342
+ if is_main:
1343
+ logger.warning("Enabling DDP find_unused_parameters to avoid unused grad errors.")
1344
+
1345
+ torch.manual_seed(config.seed + rank)
1346
+
1347
+ if torch.cuda.is_available():
1348
+ device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda")
1349
+ else:
1350
+ device = torch.device("cpu")
1351
+
1352
+ if torch.cuda.is_available() and config.bf16:
1353
+ bf16_supported = False
1354
+ try:
1355
+ bf16_supported = torch.cuda.is_bf16_supported()
1356
+ except Exception:
1357
+ bf16_supported = False
1358
+ if not bf16_supported:
1359
+ if is_main:
1360
+ logger.warning("bf16 not supported on this GPU/runtime; falling back to fp16.")
1361
+ config.bf16 = False
1362
+ if not config.fp16:
1363
+ config.fp16 = True
1364
+
1365
+ if torch.cuda.is_available() and getattr(config, "use_tf32", False):
1366
+ torch.backends.cuda.matmul.allow_tf32 = True
1367
+ torch.backends.cudnn.allow_tf32 = True
1368
+ try:
1369
+ torch.set_float32_matmul_precision("high")
1370
+ except Exception:
1371
+ pass
1372
+
1373
+ if is_main:
1374
+ logger.info("=" * 60)
1375
+ logger.info("Qwen3-4B + Titans training (DDP/FSDP)")
1376
+ logger.info("=" * 60)
1377
+ logger.info(f"distributed={is_distributed}, world_size={world_size}, use_fsdp={config.use_fsdp}")
1378
+ logger.info(f"mode={'EVAL_ONLY' if args.eval_only else 'TRAIN'}")
1379
+ logger.info(f"model_path={config.model_path}")
1380
+ logger.info(f"data_path={config.data_path}")
1381
+ logger.info(f"output_dir={config.output_dir}")
1382
+ logger.info(f"max_samples={config.max_samples}")
1383
+ logger.info(f"max_length={config.max_length}")
1384
+ logger.info(f"chunk_size={config.chunk_size}")
1385
+ logger.info(f"use_memory={config.use_memory}")
1386
+ if config.use_memory:
1387
+ logger.info(f"memory_layer_stride={config.memory_layer_stride}")
1388
+ logger.info(f"chunkwise_backward={config.chunkwise_backward}")
1389
+ logger.info(f"label_prefix_tokens={config.label_prefix_tokens}")
1390
+ logger.info(f"detach_mem_state={config.detach_mem_state}")
1391
+ if config.eval_topk:
1392
+ logger.info(f"eval_topk={config.eval_topk}")
1393
+
1394
+ tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True)
1395
+ if tokenizer.pad_token is None:
1396
+ tokenizer.pad_token = tokenizer.eos_token
1397
+
1398
+ # disable flash-attn / torchao / torchvision detection
1399
+ try:
1400
+ import transformers
1401
+ from transformers.utils import import_utils as _import_utils
1402
+
1403
+ def _disabled(*args, **kwargs):
1404
+ return False
1405
+
1406
+ _import_utils.is_flash_attn_2_available = _disabled
1407
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"):
1408
+ transformers.utils.is_flash_attn_2_available = _disabled
1409
+
1410
+ _import_utils.is_torchao_available = _disabled
1411
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_torchao_available"):
1412
+ transformers.utils.is_torchao_available = _disabled
1413
+
1414
+ _import_utils.is_torchvision_available = _disabled
1415
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_torchvision_available"):
1416
+ transformers.utils.is_torchvision_available = _disabled
1417
+ except Exception as e:
1418
+ logger.warning(f"Disable checks failed (ignored): {e}")
1419
+
1420
+ torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32)
1421
+
1422
+ qwen_model = AutoModelForCausalLM.from_pretrained(
1423
+ config.model_path,
1424
+ torch_dtype=torch_dtype,
1425
+ device_map=None,
1426
+ trust_remote_code=True,
1427
+ attn_implementation="sdpa",
1428
+ low_cpu_mem_usage=True,
1429
+ )
1430
+ qwen_model.to(device)
1431
+ qwen_model.config.use_cache = False
1432
+ if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"):
1433
+ qwen_model.gradient_checkpointing_enable()
1434
+
1435
+ train_dataset = BABILongDataset(
1436
+ config.data_path,
1437
+ tokenizer,
1438
+ max_length=config.max_length,
1439
+ answer_reserve_tokens=config.answer_reserve_tokens,
1440
+ label_prefix_tokens=config.label_prefix_tokens,
1441
+ max_samples=config.max_samples,
1442
+ )
1443
+
1444
+ train_size = int(0.9 * len(train_dataset))
1445
+ eval_size = len(train_dataset) - train_size
1446
+ train_dataset, eval_dataset = torch.utils.data.random_split(
1447
+ train_dataset,
1448
+ [train_size, eval_size],
1449
+ generator=torch.Generator().manual_seed(config.seed),
1450
+ )
1451
+
1452
+ if is_main and config.debug_data_samples > 0:
1453
+ log_dataset_debug_stats(train_dataset, tokenizer, "train", config.debug_data_samples)
1454
+ log_dataset_debug_stats(eval_dataset, tokenizer, "eval", config.debug_data_samples)
1455
+
1456
+ train_sampler = None
1457
+ eval_sampler = None
1458
+ if is_distributed:
1459
+ from torch.utils.data.distributed import DistributedSampler
1460
+ train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed)
1461
+ eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False)
1462
+
1463
+ train_dataloader = DataLoader(
1464
+ train_dataset,
1465
+ batch_size=config.batch_size,
1466
+ shuffle=(train_sampler is None),
1467
+ sampler=train_sampler,
1468
+ collate_fn=collate_fn,
1469
+ num_workers=0,
1470
+ )
1471
+ eval_dataloader = DataLoader(
1472
+ eval_dataset,
1473
+ batch_size=config.batch_size,
1474
+ shuffle=False,
1475
+ sampler=eval_sampler,
1476
+ collate_fn=collate_fn,
1477
+ num_workers=0,
1478
+ )
1479
+
1480
+ model = QwenTitansForBABILong(qwen_model, config)
1481
+ model.to(device)
1482
+
1483
+ if is_distributed:
1484
+ if config.use_fsdp:
1485
+ from functools import partial
1486
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision
1487
+ from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
1488
+ from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer
1489
+
1490
+ mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype)
1491
+ auto_wrap = partial(transformer_auto_wrap_policy, transformer_layer_cls={Qwen3DecoderLayer, QwenDecoderLayerWithTitansMemory})
1492
+
1493
+ model = FSDP(
1494
+ model,
1495
+ auto_wrap_policy=auto_wrap,
1496
+ mixed_precision=mp_policy,
1497
+ device_id=torch.cuda.current_device(),
1498
+ use_orig_params=config.fsdp_use_orig_params,
1499
+ ignored_modules=model.get_memory_modules(),
1500
+ )
1501
+ else:
1502
+ model = DDP(
1503
+ model,
1504
+ device_ids=[local_rank],
1505
+ output_device=local_rank,
1506
+ find_unused_parameters=config.ddp_find_unused_parameters,
1507
+ )
1508
+ if config.gradient_checkpointing:
1509
+ try:
1510
+ model._set_static_graph()
1511
+ if is_main:
1512
+ logger.warning("DDP static graph enabled for gradient checkpointing.")
1513
+ except Exception as e:
1514
+ if is_main:
1515
+ logger.warning(f"DDP static graph enable failed (ignored): {e}")
1516
+
1517
+ trainer = Trainer(
1518
+ model=model,
1519
+ train_dataloader=train_dataloader,
1520
+ eval_dataloader=eval_dataloader,
1521
+ config=config,
1522
+ rank=rank,
1523
+ world_size=world_size,
1524
+ is_distributed=is_distributed,
1525
+ tokenizer=tokenizer,
1526
+ )
1527
+
1528
+ if args.eval_only:
1529
+ ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name)
1530
+ if is_main:
1531
+ logger.info(f"eval_only: loading checkpoint: {ckpt_path}")
1532
+ ckpt = torch.load(ckpt_path, map_location="cpu")
1533
+ has_full = isinstance(ckpt, dict) and ("model_state_dict" in ckpt)
1534
+ if has_full:
1535
+ full_sd = ckpt["model_state_dict"]
1536
+ if is_fsdp_model(model):
1537
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1538
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1539
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_cfg):
1540
+ sd_to_load = full_sd if is_main else {}
1541
+ model.load_state_dict(sd_to_load, strict=False)
1542
+ else:
1543
+ unwrap_model(model).load_state_dict(full_sd, strict=False)
1544
+
1545
+ memory_sd = ckpt.get("memory_state_dict", ckpt if isinstance(ckpt, dict) else {})
1546
+ memory_sd = {k: v for k, v in memory_sd.items() if ("neural_memory" in k) or ("mem_gate" in k)}
1547
+ if len(memory_sd) > 0:
1548
+ if is_fsdp_model(model):
1549
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1550
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1551
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_cfg):
1552
+ sd_to_load = memory_sd if is_main else {}
1553
+ model.load_state_dict(sd_to_load, strict=False)
1554
+ else:
1555
+ unwrap_model(model).load_state_dict(memory_sd, strict=False)
1556
+
1557
+ eval_metrics = trainer.evaluate()
1558
+ if is_main:
1559
+ ppl = float(math.exp(min(20.0, eval_metrics["loss"])))
1560
+ logger.info(
1561
+ f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl≈{ppl:.3f}, "
1562
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1563
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1564
+ )
1565
+ cleanup_distributed()
1566
+ return
1567
+
1568
+ trainer.train()
1569
+ cleanup_distributed()
1570
+
1571
+
1572
+ if __name__ == "__main__":
1573
+ main()
examples/train_qwen_titans_babilong_v3.py ADDED
@@ -0,0 +1,1683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Qwen3 + Titans Deep Integration (v3) - BABILong QA1 (32k)
3
+
4
+ Key improvements over v1/v2:
5
+ 1. Deep Attention Integration: Memory participates in attention K/V computation
6
+ 2. Cross-chunk gradient flow: Support detach_mem_state=False for better long-term learning
7
+ 3. Memory-Augmented Attention: Memory as additional context source
8
+ 4. Dual-path architecture: Both attention-level and layer-level memory integration
9
+ 5. Better memory state management with optional gradient flow
10
+ """
11
+
12
+ import os
13
+ import sys
14
+
15
+ # =============================================================================
16
+ # CRITICAL: Disable torchao BEFORE importing transformers to avoid version conflicts
17
+ # =============================================================================
18
+ os.environ["TRANSFORMERS_NO_TORCHAO"] = "1"
19
+
20
+ # Mock torchao to prevent import errors
21
+ class _MockTorchAO:
22
+ def __getattr__(self, name):
23
+ return _MockTorchAO()
24
+ def __call__(self, *args, **kwargs):
25
+ return _MockTorchAO()
26
+
27
+ sys.modules['torchao'] = _MockTorchAO()
28
+ sys.modules['torchao.quantization'] = _MockTorchAO()
29
+
30
+ import json
31
+ import math
32
+ import argparse
33
+ import logging
34
+ import weakref
35
+ from contextlib import nullcontext
36
+ from dataclasses import dataclass, asdict, field
37
+ from typing import Optional, Dict, Any, List, Tuple, Callable
38
+
39
+ import torch
40
+ import torch.nn as nn
41
+ import torch.nn.functional as F
42
+ import torch.distributed as dist
43
+ from torch.utils.data import Dataset, DataLoader
44
+ from torch.optim import AdamW
45
+ from torch.optim.lr_scheduler import CosineAnnealingLR
46
+ from torch.nn.parallel import DistributedDataParallel as DDP
47
+ from tqdm import tqdm
48
+
49
+ from einops import rearrange, repeat
50
+
51
+ # add repo root to sys.path
52
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
53
+
54
+ # Titans components
55
+ from titans_pytorch import NeuralMemory, MemoryMLP
56
+ from titans_pytorch.neural_memory import NeuralMemState
57
+
58
+ logging.basicConfig(
59
+ level=logging.INFO,
60
+ format="%(asctime)s - %(levelname)s - %(message)s"
61
+ )
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ # =============================================================================
66
+ # Configuration
67
+ # =============================================================================
68
+
69
+ @dataclass
70
+ class TrainingConfig:
71
+ # paths
72
+ model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554"
73
+ data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json"
74
+ output_dir: str = "./outputs/qwen_titans_babilong_v3"
75
+
76
+ # training
77
+ num_epochs: int = 10 # Aligned with v1/v2
78
+ batch_size: int = 1 # Reduced for 32k length
79
+ gradient_accumulation_steps: int = 16 # Increased to maintain effective batch size
80
+ max_grad_norm: float = 1.0
81
+
82
+ # learning rates (separate for different components)
83
+ lr_memory: float = 1e-4
84
+ lr_memory_attention: float = 5e-5 # NEW: for attention-integrated memory
85
+ lr_pretrained: float = 5e-6
86
+ weight_decay: float = 0.01
87
+ warmup_steps: int = 100
88
+
89
+ # streaming / memory
90
+ chunk_size: int = 4096 # Reduced for memory efficiency
91
+ use_memory: bool = True
92
+ memory_chunk_size: int = 128
93
+ memory_batch_size: int = 128
94
+ memory_heads: int = 8
95
+ memory_dim_head: int = 64
96
+ memory_depth: int = 1 # Same as v1/v2
97
+ memory_layer_stride: int = 8 # Same as v1/v2
98
+ memory_fp32: bool = True
99
+
100
+ # NEW: v3 specific options
101
+ detach_mem_state: bool = True # True for memory efficiency, set False for gradient flow
102
+ deep_memory_integration: bool = False # Disable by default for memory efficiency
103
+ memory_as_context: bool = False # Disable by default for memory efficiency
104
+ num_memory_tokens: int = 16 # Number of virtual memory tokens in attention
105
+ memory_gate_bias: float = -2.0 # Initial bias for memory gate (conservative)
106
+ use_momentum: bool = True
107
+ momentum_order: int = 1
108
+
109
+ # gradient flow control
110
+ gradient_checkpoint_memory: bool = False # Checkpoint memory computation
111
+ cross_chunk_gradient_steps: int = 2 # Allow gradient through N chunks back
112
+
113
+ # evaluation / logging
114
+ eval_steps: int = 200
115
+ eval_topk: int = 0
116
+ logging_steps: int = 10
117
+ log_every_batches: int = 80
118
+ final_eval_print_examples: int = 10
119
+ debug_data_samples: int = 0
120
+ debug_label_batches: int = 0
121
+ debug_eval_stats: bool = False
122
+ debug_grad_norm: bool = False
123
+
124
+ # precision
125
+ bf16: bool = True
126
+ fp16: bool = False
127
+ use_tf32: bool = True
128
+ gradient_checkpointing: bool = False
129
+ chunkwise_backward: bool = True
130
+
131
+ # data
132
+ max_length: int = 32768
133
+ answer_reserve_tokens: int = 64
134
+ label_prefix_tokens: int = 0
135
+ max_samples: Optional[int] = 500 # Same as v1/v2 experiments (450 train, 50 eval)
136
+
137
+ # distributed
138
+ use_fsdp: bool = False
139
+ fsdp_use_orig_params: bool = True
140
+ ddp_find_unused_parameters: bool = False
141
+
142
+ # checkpoint
143
+ save_full_checkpoint: bool = True
144
+ final_ckpt_name: str = "final_memory_checkpoint.pt"
145
+ final_full_ckpt_name: str = "final_full_checkpoint.pt"
146
+
147
+ seed: int = 42
148
+
149
+
150
+ # =============================================================================
151
+ # Dataset
152
+ # =============================================================================
153
+
154
+ class BABILongDataset(Dataset):
155
+ def __init__(
156
+ self,
157
+ data_path: str,
158
+ tokenizer,
159
+ max_length: int = 32768,
160
+ answer_reserve_tokens: int = 64,
161
+ label_prefix_tokens: int = 0,
162
+ max_samples: Optional[int] = None,
163
+ ):
164
+ self.tokenizer = tokenizer
165
+ self.max_length = max_length
166
+ self.answer_reserve_tokens = answer_reserve_tokens
167
+ self.label_prefix_tokens = int(label_prefix_tokens)
168
+
169
+ logger.info(f"Loading dataset: {data_path}")
170
+ with open(data_path, "r") as f:
171
+ self.data = json.load(f)
172
+
173
+ if max_samples:
174
+ self.data = self.data[:max_samples]
175
+
176
+ logger.info(f"Dataset size: {len(self.data)}")
177
+
178
+ def __len__(self):
179
+ return len(self.data)
180
+
181
+ def __getitem__(self, idx):
182
+ item = self.data[idx]
183
+ text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:"
184
+ target = item["target"]
185
+
186
+ pad_id = self.tokenizer.pad_token_id or 0
187
+ reserve = int(self.answer_reserve_tokens)
188
+
189
+ prompt_ids = self.tokenizer(
190
+ text,
191
+ max_length=max(self.max_length - reserve, 1),
192
+ truncation=True,
193
+ add_special_tokens=True,
194
+ return_tensors="pt",
195
+ ).input_ids.squeeze(0)
196
+
197
+ answer_ids = self.tokenizer(
198
+ f" {target}",
199
+ add_special_tokens=False,
200
+ return_tensors="pt",
201
+ ).input_ids.squeeze(0)
202
+
203
+ available = max(self.max_length - prompt_ids.numel(), 0)
204
+ answer_ids = answer_ids[:available]
205
+
206
+ input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length]
207
+
208
+ labels = torch.full_like(input_ids, fill_value=-100)
209
+ if answer_ids.numel() > 0:
210
+ start = prompt_ids.numel()
211
+ end = min(start + answer_ids.numel(), labels.numel())
212
+ labels[start:end] = input_ids[start:end]
213
+ if self.label_prefix_tokens > 0:
214
+ prefix = min(start, self.label_prefix_tokens)
215
+ if prefix > 0:
216
+ labels[start - prefix:start] = input_ids[start - prefix:start]
217
+
218
+ seq_len = input_ids.numel()
219
+ if seq_len < self.max_length:
220
+ pad_len = self.max_length - seq_len
221
+ input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id))
222
+ labels = F.pad(labels, (0, pad_len), value=-100)
223
+ attention_mask = torch.cat(
224
+ [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)],
225
+ dim=0,
226
+ )
227
+ else:
228
+ attention_mask = torch.ones(self.max_length, dtype=torch.long)
229
+
230
+ return {
231
+ "input_ids": input_ids.to(dtype=torch.long),
232
+ "labels": labels.to(dtype=torch.long),
233
+ "attention_mask": attention_mask,
234
+ }
235
+
236
+
237
+ def collate_fn(batch):
238
+ keys = batch[0].keys()
239
+ return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys}
240
+
241
+
242
+ # =============================================================================
243
+ # Memory-Augmented Attention Module (NEW in v3)
244
+ # =============================================================================
245
+
246
+ class MemoryAugmentedAttention(nn.Module):
247
+ """
248
+ Deep integration of memory into attention mechanism.
249
+ Memory provides additional context that enhances hidden states.
250
+ """
251
+ def __init__(
252
+ self,
253
+ hidden_size: int,
254
+ num_attention_heads: int,
255
+ num_memory_tokens: int = 16,
256
+ memory_dim_head: int = 64,
257
+ memory_fp32: bool = True,
258
+ gate_bias: float = -2.0,
259
+ ):
260
+ super().__init__()
261
+ self.hidden_size = hidden_size
262
+ self.num_heads = num_attention_heads
263
+ self.head_dim = hidden_size // num_attention_heads
264
+ self.memory_fp32 = memory_fp32
265
+
266
+ # Memory transformation: projects and mixes memory with hidden states
267
+ self.memory_transform = nn.Sequential(
268
+ nn.Linear(hidden_size, hidden_size),
269
+ nn.SiLU(),
270
+ nn.Linear(hidden_size, hidden_size),
271
+ )
272
+
273
+ # Learnable memory gate per head
274
+ self.memory_gate = nn.Parameter(torch.full((num_attention_heads, 1, 1), gate_bias))
275
+
276
+ # Output projection
277
+ self.memory_output_proj = nn.Linear(hidden_size, hidden_size, bias=False)
278
+ nn.init.zeros_(self.memory_output_proj.weight) # Start as identity
279
+
280
+ def forward(
281
+ self,
282
+ hidden_states: torch.Tensor,
283
+ memory_context: torch.Tensor,
284
+ attention_mask: Optional[torch.Tensor] = None,
285
+ ) -> torch.Tensor:
286
+ """
287
+ Args:
288
+ hidden_states: [batch, seq_len, hidden_size] - current hidden states
289
+ memory_context: [batch, seq_len, hidden_size] - retrieved memory
290
+ attention_mask: optional attention mask
291
+ Returns:
292
+ enhanced_hidden: [batch, seq_len, hidden_size]
293
+ """
294
+ batch_size, seq_len, _ = hidden_states.shape
295
+
296
+ # Transform memory
297
+ mem_transformed = self.memory_transform(memory_context)
298
+
299
+ # Reshape to multi-head format for gating
300
+ mem_heads = rearrange(mem_transformed, 'b n (h d) -> b h n d', h=self.num_heads)
301
+
302
+ # Compute memory gate (sigmoid for smooth interpolation)
303
+ gate = torch.sigmoid(self.memory_gate) # [num_heads, 1, 1]
304
+
305
+ # Apply gated memory enhancement
306
+ mem_contribution = mem_heads * gate
307
+ mem_contribution = rearrange(mem_contribution, 'b h n d -> b n (h d)')
308
+
309
+ # Project and add to hidden states
310
+ enhanced = hidden_states + self.memory_output_proj(mem_contribution)
311
+
312
+ return enhanced
313
+
314
+
315
+ # =============================================================================
316
+ # Deep Memory Layer (v3 - integrates memory at multiple levels)
317
+ # =============================================================================
318
+
319
+ class QwenDecoderLayerWithDeepMemory(nn.Module):
320
+ """
321
+ v3: Deep integration of Titans memory into Qwen decoder layer.
322
+
323
+ Key differences from v1/v2:
324
+ 1. Memory participates in attention computation (not just post-processing)
325
+ 2. Support for cross-chunk gradient flow
326
+ 3. Dual-path architecture: attention-level + layer-level memory
327
+ 4. Better memory state management
328
+ """
329
+ def __init__(
330
+ self,
331
+ base_layer: nn.Module,
332
+ layer_idx: int,
333
+ *,
334
+ hidden_size: int,
335
+ num_attention_heads: int,
336
+ chunk_size: int,
337
+ batch_size: int,
338
+ dim_head: int,
339
+ num_heads: int,
340
+ memory_depth: int,
341
+ memory_fp32: bool,
342
+ detach_mem_state: bool,
343
+ deep_integration: bool,
344
+ memory_as_context: bool,
345
+ num_memory_tokens: int,
346
+ memory_gate_bias: float,
347
+ use_momentum: bool,
348
+ momentum_order: int,
349
+ parent_model: Optional[nn.Module] = None,
350
+ ):
351
+ super().__init__()
352
+ self.layer = base_layer
353
+ self.layer_idx = layer_idx
354
+ self.memory_fp32 = memory_fp32
355
+ self.detach_mem_state = bool(detach_mem_state)
356
+ self.deep_integration = deep_integration
357
+ self.memory_as_context = memory_as_context
358
+ self.memory_state: Optional[NeuralMemState] = None
359
+ self.parent_model_ref = weakref.ref(parent_model) if parent_model is not None else None
360
+
361
+ # Chunk counter for gradient flow control
362
+ self._chunk_counter = 0
363
+ self._gradient_steps_back = 2 # Allow gradient through 2 chunks
364
+
365
+ # Core Neural Memory module
366
+ memory_model = MemoryMLP(
367
+ dim=dim_head,
368
+ depth=memory_depth,
369
+ expansion_factor=2.0,
370
+ )
371
+
372
+ self.neural_memory = NeuralMemory(
373
+ dim=hidden_size,
374
+ chunk_size=chunk_size,
375
+ batch_size=batch_size,
376
+ dim_head=dim_head,
377
+ heads=num_heads,
378
+ model=memory_model,
379
+ momentum=use_momentum,
380
+ momentum_order=momentum_order,
381
+ qk_rmsnorm=True,
382
+ pre_rmsnorm=True,
383
+ default_step_transform_max_lr=1e-2,
384
+ init_adaptive_step_bias=-4.0, # Slightly higher than v1/v2 for faster adaptation
385
+ max_grad_norm=1.0,
386
+ spectral_norm_surprises=True,
387
+ use_accelerated_scan=False,
388
+ )
389
+
390
+ # Layer-level memory gate (similar to v1/v2)
391
+ self.mem_gate = nn.Sequential(
392
+ nn.Linear(hidden_size * 2, hidden_size),
393
+ nn.SiLU(),
394
+ nn.Linear(hidden_size, hidden_size),
395
+ nn.Sigmoid(),
396
+ )
397
+ # Initialize gate to be conservative
398
+ nn.init.zeros_(self.mem_gate[-2].weight)
399
+ nn.init.constant_(self.mem_gate[-2].bias, memory_gate_bias)
400
+
401
+ # NEW: Deep attention integration
402
+ if deep_integration:
403
+ self.memory_attention = MemoryAugmentedAttention(
404
+ hidden_size=hidden_size,
405
+ num_attention_heads=num_attention_heads,
406
+ num_memory_tokens=num_memory_tokens,
407
+ memory_dim_head=dim_head,
408
+ memory_fp32=memory_fp32,
409
+ gate_bias=memory_gate_bias,
410
+ )
411
+ else:
412
+ self.memory_attention = None
413
+
414
+ # NEW: Pre-attention memory projection (for memory-as-context)
415
+ if memory_as_context:
416
+ self.memory_context_proj = nn.Sequential(
417
+ nn.Linear(hidden_size, hidden_size),
418
+ nn.SiLU(),
419
+ nn.Linear(hidden_size, hidden_size),
420
+ )
421
+ nn.init.zeros_(self.memory_context_proj[-1].weight)
422
+ nn.init.zeros_(self.memory_context_proj[-1].bias)
423
+ else:
424
+ self.memory_context_proj = None
425
+
426
+ # Move to appropriate device/dtype
427
+ try:
428
+ layer_device = next(base_layer.parameters()).device
429
+ layer_dtype = next(base_layer.parameters()).dtype
430
+ except StopIteration:
431
+ layer_device = None
432
+ layer_dtype = None
433
+
434
+ if layer_device is not None:
435
+ mem_dtype = torch.float32 if memory_fp32 else layer_dtype
436
+ self.neural_memory = self.neural_memory.to(device=layer_device, dtype=mem_dtype)
437
+ if layer_dtype is not None:
438
+ self.mem_gate = self.mem_gate.to(device=layer_device, dtype=layer_dtype)
439
+ if self.memory_attention is not None:
440
+ self.memory_attention = self.memory_attention.to(device=layer_device, dtype=layer_dtype)
441
+ if self.memory_context_proj is not None:
442
+ self.memory_context_proj = self.memory_context_proj.to(device=layer_device, dtype=layer_dtype)
443
+
444
+ def reset_memory_state(self):
445
+ self.memory_state = None
446
+ self._chunk_counter = 0
447
+
448
+ def set_gradient_steps_back(self, steps: int):
449
+ """Control how many chunks back gradient can flow."""
450
+ self._gradient_steps_back = steps
451
+
452
+ def _get_store_mask(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]:
453
+ parent_model = self.parent_model_ref() if self.parent_model_ref is not None else None
454
+ if parent_model is None or not hasattr(parent_model, "_mem_store_mask"):
455
+ return None
456
+ store_mask = getattr(parent_model, "_mem_store_mask")
457
+ if store_mask is None:
458
+ return None
459
+ store_mask = store_mask.to(device=hidden_states.device).bool()
460
+ if store_mask.shape[:2] != hidden_states.shape[:2]:
461
+ return None
462
+ return store_mask
463
+
464
+ def _should_detach_state(self) -> bool:
465
+ """Determine if memory state should be detached based on chunk counter."""
466
+ if self.detach_mem_state:
467
+ return True
468
+ # Allow gradient flow through recent chunks
469
+ self._chunk_counter += 1
470
+ return self._chunk_counter > self._gradient_steps_back
471
+
472
+ def forward(self, *args, **kwargs):
473
+ # Get original layer output
474
+ outputs = self.layer(*args, **kwargs)
475
+
476
+ if isinstance(outputs, (tuple, list)):
477
+ hidden_states = outputs[0]
478
+ rest = outputs[1:]
479
+ else:
480
+ hidden_states = outputs
481
+ rest = None
482
+
483
+ # Get store mask
484
+ full_store_mask = self._get_store_mask(hidden_states)
485
+
486
+ # Prepare memory input
487
+ mem_inp = hidden_states.float() if self.memory_fp32 else hidden_states
488
+
489
+ # Prepare store sequence and mask
490
+ store_seq = None
491
+ store_mask = full_store_mask
492
+ if store_mask is not None:
493
+ store_seq = mem_inp
494
+ # Skip first token if not the first chunk
495
+ if store_mask.shape[1] > 0 and not store_mask[:, 0].any():
496
+ store_seq = store_seq[:, 1:]
497
+ store_mask = store_mask[:, 1:]
498
+
499
+ # Align to chunk size
500
+ store_chunk = self.neural_memory.store_chunk_size
501
+ remainder = store_seq.shape[1] % store_chunk
502
+ if remainder != 0:
503
+ store_seq = store_seq[:, :-remainder]
504
+ store_mask = store_mask[:, :-remainder]
505
+
506
+ if store_mask is not None and store_seq is not None:
507
+ if store_mask.shape[1] != store_seq.shape[1]:
508
+ min_len = min(store_mask.shape[1], store_seq.shape[1])
509
+ store_seq = store_seq[:, :min_len]
510
+ store_mask = store_mask[:, :min_len]
511
+
512
+ if store_seq.shape[1] == 0:
513
+ store_seq = None
514
+ store_mask = None
515
+
516
+ # Memory computation context
517
+ mem_ctx = (
518
+ torch.amp.autocast(device_type=hidden_states.device.type, enabled=False)
519
+ if self.memory_fp32
520
+ else nullcontext()
521
+ )
522
+
523
+ # Determine if we should detach memory state
524
+ should_detach = self._should_detach_state()
525
+
526
+ with mem_ctx:
527
+ retrieved, next_state = self.neural_memory(
528
+ mem_inp,
529
+ store_seq=store_seq,
530
+ state=self.memory_state,
531
+ store_mask=store_mask,
532
+ detach_mem_state=should_detach,
533
+ )
534
+ self.memory_state = next_state
535
+
536
+ if retrieved is not None:
537
+ retrieved = retrieved.to(dtype=hidden_states.dtype)
538
+
539
+ # Apply store mask to retrieved memory
540
+ if full_store_mask is not None and full_store_mask.shape[:2] == retrieved.shape[:2]:
541
+ retrieved = retrieved * full_store_mask.unsqueeze(-1).to(dtype=retrieved.dtype)
542
+
543
+ # ===== v3 Deep Integration =====
544
+
545
+ # Path 1: Memory-augmented attention (if enabled)
546
+ if self.memory_attention is not None:
547
+ hidden_states = self.memory_attention(
548
+ hidden_states=hidden_states,
549
+ memory_context=retrieved,
550
+ attention_mask=None, # Could pass attention mask if needed
551
+ )
552
+
553
+ # Path 2: Memory as context projection (if enabled)
554
+ if self.memory_context_proj is not None:
555
+ context_enhancement = self.memory_context_proj(retrieved)
556
+ hidden_states = hidden_states + context_enhancement
557
+
558
+ # Path 3: Layer-level gated fusion (always active)
559
+ gate = self.mem_gate(torch.cat([hidden_states, retrieved], dim=-1))
560
+ hidden_states = hidden_states + gate * retrieved
561
+
562
+ if rest is None:
563
+ return hidden_states
564
+ return (hidden_states, *rest)
565
+
566
+
567
+ # =============================================================================
568
+ # Main Model Wrapper
569
+ # =============================================================================
570
+
571
+ class QwenTitansForBABILongV3(nn.Module):
572
+ """
573
+ v3: Qwen3 with deep Titans memory integration.
574
+ """
575
+ def __init__(self, qwen_model, config: TrainingConfig):
576
+ super().__init__()
577
+ self.qwen = qwen_model
578
+ self.config = config
579
+ self.hidden_size = qwen_model.config.hidden_size
580
+ self.num_attention_heads = qwen_model.config.num_attention_heads
581
+ self.use_memory = bool(getattr(config, "use_memory", True))
582
+
583
+ if self.use_memory:
584
+ self.memory_layer_stride = int(getattr(config, "memory_layer_stride", 6))
585
+ self.memory_layer_indices = [
586
+ idx for idx in range(len(self.qwen.model.layers))
587
+ if idx % self.memory_layer_stride == 0
588
+ ]
589
+
590
+ for layer_idx in self.memory_layer_indices:
591
+ base_layer = self.qwen.model.layers[layer_idx]
592
+ wrapped = QwenDecoderLayerWithDeepMemory(
593
+ base_layer,
594
+ layer_idx=layer_idx,
595
+ hidden_size=self.hidden_size,
596
+ num_attention_heads=self.num_attention_heads,
597
+ chunk_size=config.memory_chunk_size,
598
+ batch_size=config.memory_batch_size,
599
+ dim_head=config.memory_dim_head,
600
+ num_heads=config.memory_heads,
601
+ memory_depth=config.memory_depth,
602
+ memory_fp32=config.memory_fp32,
603
+ detach_mem_state=config.detach_mem_state,
604
+ deep_integration=config.deep_memory_integration,
605
+ memory_as_context=config.memory_as_context,
606
+ num_memory_tokens=config.num_memory_tokens,
607
+ memory_gate_bias=config.memory_gate_bias,
608
+ use_momentum=config.use_momentum,
609
+ momentum_order=config.momentum_order,
610
+ parent_model=self.qwen.model,
611
+ )
612
+ self.qwen.model.layers[layer_idx] = wrapped
613
+ else:
614
+ self.memory_layer_stride = 0
615
+ self.memory_layer_indices = []
616
+
617
+ if self.use_memory:
618
+ logger.info("[QwenTitansForBABILongV3] Initialized with DEEP memory integration")
619
+ logger.info(f" - hidden_size: {self.hidden_size}")
620
+ logger.info(f" - num_attention_heads: {self.num_attention_heads}")
621
+ logger.info(f" - chunk_size: {config.chunk_size}")
622
+ logger.info(f" - memory_layer_stride: {self.memory_layer_stride}")
623
+ logger.info(f" - memory_layers: {self.memory_layer_indices}")
624
+ logger.info(f" - deep_memory_integration: {config.deep_memory_integration}")
625
+ logger.info(f" - memory_as_context: {config.memory_as_context}")
626
+ logger.info(f" - detach_mem_state: {config.detach_mem_state}")
627
+ logger.info(f" - cross_chunk_gradient_steps: {config.cross_chunk_gradient_steps}")
628
+ else:
629
+ logger.info("[QwenTitansForBABILongV3] Initialized (memory disabled)")
630
+
631
+ self._memory_layers = [
632
+ layer for layer in self.qwen.model.layers
633
+ if isinstance(layer, QwenDecoderLayerWithDeepMemory)
634
+ ]
635
+ self.qwen.model._mem_store_mask = None
636
+
637
+ # Set gradient steps for cross-chunk gradient flow
638
+ for layer in self._memory_layers:
639
+ layer.set_gradient_steps_back(config.cross_chunk_gradient_steps)
640
+
641
+ def _split_into_chunks(self, tensor, chunk_size):
642
+ seq_len = tensor.shape[1]
643
+ chunks = []
644
+ for start in range(0, seq_len, chunk_size):
645
+ end = min(start + chunk_size, seq_len)
646
+ chunks.append((start, end, tensor[:, start:end]))
647
+ return chunks
648
+
649
+ def reset_memory_states(self):
650
+ for layer in self._memory_layers:
651
+ layer.reset_memory_state()
652
+
653
+ def _set_mem_store_mask(
654
+ self,
655
+ chunk_ids: torch.Tensor,
656
+ chunk_mask: Optional[torch.Tensor],
657
+ chunk_start: int,
658
+ ) -> None:
659
+ if not self.use_memory:
660
+ self.qwen.model._mem_store_mask = None
661
+ return
662
+ if chunk_mask is None:
663
+ if chunk_start > 0:
664
+ store_mask = torch.ones_like(chunk_ids, dtype=torch.bool)
665
+ store_mask[:, 0] = False
666
+ else:
667
+ store_mask = None
668
+ else:
669
+ store_mask = chunk_mask.to(device=chunk_ids.device).bool()
670
+ if chunk_start > 0:
671
+ store_mask[:, 0] = False
672
+ self.qwen.model._mem_store_mask = store_mask
673
+
674
+ def get_memory_modules(self) -> List[nn.Module]:
675
+ if not self._memory_layers:
676
+ return []
677
+ modules = []
678
+ for layer in self._memory_layers:
679
+ modules.append(layer.neural_memory)
680
+ modules.append(layer.mem_gate)
681
+ if layer.memory_attention is not None:
682
+ modules.append(layer.memory_attention)
683
+ if layer.memory_context_proj is not None:
684
+ modules.append(layer.memory_context_proj)
685
+ return modules
686
+
687
+ def forward(
688
+ self,
689
+ input_ids: torch.Tensor,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ labels: Optional[torch.Tensor] = None,
692
+ return_pred_tokens: bool = False,
693
+ topk: int = 0,
694
+ chunk_start: Optional[int] = None,
695
+ chunk_end: Optional[int] = None,
696
+ reset_mem_state: bool = False,
697
+ ) -> Dict[str, torch.Tensor]:
698
+
699
+ # Single chunk forward (for chunkwise backward)
700
+ if chunk_start is not None or chunk_end is not None:
701
+ start = 0 if chunk_start is None else int(chunk_start)
702
+ end = int(chunk_end) if chunk_end is not None else None
703
+ return self._forward_single_chunk(
704
+ input_ids=input_ids,
705
+ attention_mask=attention_mask,
706
+ labels=labels,
707
+ chunk_start=start,
708
+ chunk_end=end,
709
+ reset_mem_state=reset_mem_state,
710
+ )
711
+
712
+ # Full sequence forward
713
+ batch_size, seq_len = input_ids.shape
714
+ chunk_size = self.config.chunk_size
715
+ chunks = self._split_into_chunks(input_ids, chunk_size)
716
+
717
+ self.reset_memory_states()
718
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
719
+ total_loss_sum = None
720
+ total_loss_tokens = 0
721
+ topk_correct = None
722
+ topk_total = None
723
+
724
+ pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
725
+ target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)]
726
+
727
+ if topk and topk > 0:
728
+ device = input_ids.device
729
+ topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32)
730
+ topk_total = torch.tensor(0.0, device=device, dtype=torch.float32)
731
+
732
+ for start, end, _ in chunks:
733
+ proc_start = max(0, start - 1)
734
+ chunk_ids = input_ids[:, proc_start:end]
735
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
736
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
737
+
738
+ self._set_mem_store_mask(chunk_ids, chunk_mask, start)
739
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
740
+ if self.use_memory:
741
+ self.qwen.model._mem_store_mask = None
742
+
743
+ if chunk_labels is not None and (chunk_labels != -100).any():
744
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
745
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
746
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
747
+
748
+ valid = shift_labels != -100
749
+ if valid.any():
750
+ hs = shift_hidden[valid]
751
+ targets = shift_labels[valid]
752
+
753
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
754
+ logits = self.qwen.lm_head(hs)
755
+ logits = logits.float()
756
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
757
+ targets = targets.to(device=logits.device)
758
+
759
+ chunk_loss_sum = loss_fct_sum(logits, targets)
760
+ if total_loss_sum is None:
761
+ total_loss_sum = chunk_loss_sum
762
+ else:
763
+ total_loss_sum = total_loss_sum + chunk_loss_sum
764
+ total_loss_tokens += targets.numel()
765
+
766
+ if topk and topk > 0:
767
+ k = min(int(topk), logits.shape[-1])
768
+ topk_ids = torch.topk(logits, k=k, dim=-1).indices
769
+ correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1)
770
+ topk_correct = topk_correct + correct.float().sum()
771
+ topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device)
772
+
773
+ if return_pred_tokens:
774
+ idx = valid.nonzero(as_tuple=False)
775
+ pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist()
776
+ tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist()
777
+ b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist()
778
+
779
+ for i, b_idx in enumerate(b_idx_flat):
780
+ pred_tokens_by_sample[b_idx].append(int(pred_flat[i]))
781
+ target_tokens_by_sample[b_idx].append(int(tgt_flat[i]))
782
+
783
+ if total_loss_sum is None or total_loss_tokens == 0:
784
+ device = next(self.qwen.parameters()).device
785
+ loss = torch.zeros((), device=device, dtype=torch.float32)
786
+ else:
787
+ loss = total_loss_sum / total_loss_tokens
788
+
789
+ out: Dict[str, torch.Tensor] = {"loss": loss}
790
+ if return_pred_tokens:
791
+ lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long)
792
+ max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0
793
+ if max_len > 0:
794
+ pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
795
+ tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long)
796
+ for b in range(batch_size):
797
+ L = int(lengths[b].item())
798
+ if L > 0:
799
+ pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long)
800
+ tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long)
801
+ else:
802
+ pred_mat = torch.empty((batch_size, 0), dtype=torch.long)
803
+ tgt_mat = torch.empty((batch_size, 0), dtype=torch.long)
804
+ out["pred_ids"] = pred_mat
805
+ out["target_ids"] = tgt_mat
806
+ out["target_lengths"] = lengths
807
+ if topk and topk > 0 and topk_correct is not None and topk_total is not None:
808
+ out["topk_correct"] = topk_correct
809
+ out["topk_total"] = topk_total
810
+ return out
811
+
812
+ def _forward_single_chunk(
813
+ self,
814
+ input_ids: torch.Tensor,
815
+ attention_mask: Optional[torch.Tensor],
816
+ labels: Optional[torch.Tensor],
817
+ chunk_start: int,
818
+ chunk_end: Optional[int],
819
+ reset_mem_state: bool,
820
+ ) -> Dict[str, torch.Tensor]:
821
+ if reset_mem_state:
822
+ self.reset_memory_states()
823
+
824
+ seq_len = input_ids.shape[1]
825
+ end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len)
826
+ end = min(int(end), seq_len)
827
+ start = max(0, int(chunk_start))
828
+
829
+ proc_start = max(0, start - 1)
830
+ chunk_ids = input_ids[:, proc_start:end]
831
+ chunk_labels = labels[:, proc_start:end] if labels is not None else None
832
+ chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None
833
+
834
+ self._set_mem_store_mask(chunk_ids, chunk_mask, start)
835
+ hidden_full = self._process_chunk(chunk_ids, chunk_mask)
836
+ if self.use_memory:
837
+ self.qwen.model._mem_store_mask = None
838
+
839
+ loss_fct_sum = nn.CrossEntropyLoss(reduction="sum")
840
+ total_loss_sum = None
841
+ total_loss_tokens = 0
842
+
843
+ if chunk_labels is not None and (chunk_labels != -100).any():
844
+ chunk_labels_local = chunk_labels.to(device=hidden_full.device)
845
+ shift_hidden = hidden_full[:, :-1, :].contiguous()
846
+ shift_labels = chunk_labels_local[:, 1:].contiguous()
847
+
848
+ valid = shift_labels != -100
849
+ if valid.any():
850
+ hs = shift_hidden[valid]
851
+ targets = shift_labels[valid]
852
+
853
+ hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0)
854
+ logits = self.qwen.lm_head(hs)
855
+ logits = logits.float()
856
+ logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0)
857
+ targets = targets.to(device=logits.device)
858
+
859
+ total_loss_sum = loss_fct_sum(logits, targets)
860
+ total_loss_tokens = targets.numel()
861
+
862
+ if total_loss_sum is None:
863
+ total_loss_sum = (hidden_full.float().sum() * 0.0)
864
+
865
+ return {
866
+ "loss_sum": total_loss_sum,
867
+ "loss_tokens": total_loss_tokens,
868
+ "has_grad": True,
869
+ }
870
+
871
+ def _process_chunk(
872
+ self,
873
+ chunk_ids: torch.Tensor,
874
+ chunk_attention_mask: Optional[torch.Tensor] = None,
875
+ ) -> torch.Tensor:
876
+ if hasattr(self.qwen.model, "embed_tokens"):
877
+ token_embeds = self.qwen.model.embed_tokens(chunk_ids)
878
+ else:
879
+ token_embeds = self.qwen.get_input_embeddings()(chunk_ids)
880
+
881
+ outputs = self.qwen.model(
882
+ inputs_embeds=token_embeds,
883
+ attention_mask=chunk_attention_mask,
884
+ use_cache=False,
885
+ output_hidden_states=False,
886
+ return_dict=True,
887
+ )
888
+ return outputs.last_hidden_state
889
+
890
+ def get_param_groups(self, config: TrainingConfig):
891
+ """
892
+ v3: Three parameter groups with different learning rates.
893
+ """
894
+ memory_core_params = [] # Neural memory core
895
+ memory_attention_params = [] # Memory-augmented attention
896
+ pretrained_params = []
897
+
898
+ for name, param in self.named_parameters():
899
+ if not param.requires_grad:
900
+ continue
901
+ if "neural_memory" in name or "mem_gate" in name:
902
+ memory_core_params.append(param)
903
+ elif "memory_attention" in name or "memory_context_proj" in name:
904
+ memory_attention_params.append(param)
905
+ else:
906
+ pretrained_params.append(param)
907
+
908
+ param_groups = []
909
+ if len(memory_core_params) > 0:
910
+ param_groups.append({
911
+ "params": memory_core_params,
912
+ "lr": config.lr_memory,
913
+ "weight_decay": config.weight_decay,
914
+ "name": "memory_core"
915
+ })
916
+ if len(memory_attention_params) > 0:
917
+ param_groups.append({
918
+ "params": memory_attention_params,
919
+ "lr": config.lr_memory_attention,
920
+ "weight_decay": config.weight_decay,
921
+ "name": "memory_attention"
922
+ })
923
+ if len(pretrained_params) > 0:
924
+ param_groups.append({
925
+ "params": pretrained_params,
926
+ "lr": config.lr_pretrained,
927
+ "weight_decay": config.weight_decay,
928
+ "name": "pretrained"
929
+ })
930
+
931
+ logger.info(f"Param groups: memory_core={len(memory_core_params)}, "
932
+ f"memory_attention={len(memory_attention_params)}, "
933
+ f"pretrained={len(pretrained_params)}")
934
+ return param_groups
935
+
936
+
937
+ # =============================================================================
938
+ # Distributed Training Utilities
939
+ # =============================================================================
940
+
941
+ def init_distributed() -> tuple:
942
+ if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ:
943
+ return False, 0, 0, 1
944
+
945
+ rank = int(os.environ["RANK"])
946
+ world_size = int(os.environ["WORLD_SIZE"])
947
+ local_rank = int(os.environ.get("LOCAL_RANK", 0))
948
+
949
+ if not dist.is_available():
950
+ raise RuntimeError("torch.distributed not available")
951
+
952
+ if not dist.is_initialized():
953
+ dist.init_process_group(backend="nccl", init_method="env://")
954
+
955
+ torch.cuda.set_device(local_rank)
956
+ return True, rank, local_rank, world_size
957
+
958
+
959
+ def cleanup_distributed():
960
+ if dist.is_available() and dist.is_initialized():
961
+ dist.barrier()
962
+ dist.destroy_process_group()
963
+
964
+
965
+ def unwrap_model(model: nn.Module) -> nn.Module:
966
+ if hasattr(model, "module"):
967
+ return model.module
968
+ if hasattr(model, "_fsdp_wrapped_module"):
969
+ wrapped = getattr(model, "_fsdp_wrapped_module", None)
970
+ if wrapped is not None and hasattr(wrapped, "module"):
971
+ return wrapped.module
972
+ return model
973
+
974
+
975
+ def is_fsdp_model(model: nn.Module) -> bool:
976
+ try:
977
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
978
+ return isinstance(model, FSDP)
979
+ except Exception:
980
+ return False
981
+
982
+
983
+ # =============================================================================
984
+ # Trainer
985
+ # =============================================================================
986
+
987
+ class Trainer:
988
+ def __init__(
989
+ self,
990
+ model: QwenTitansForBABILongV3,
991
+ train_dataloader: DataLoader,
992
+ eval_dataloader: DataLoader,
993
+ config: TrainingConfig,
994
+ rank: int = 0,
995
+ world_size: int = 1,
996
+ is_distributed: bool = False,
997
+ tokenizer=None,
998
+ ):
999
+ self.model = model
1000
+ self.train_dataloader = train_dataloader
1001
+ self.eval_dataloader = eval_dataloader
1002
+ self.config = config
1003
+ self.device = next(model.parameters()).device
1004
+ self.rank = rank
1005
+ self.world_size = world_size
1006
+ self.is_distributed = is_distributed
1007
+ self.is_main_process = (rank == 0)
1008
+ self.tokenizer = tokenizer
1009
+
1010
+ base_model = unwrap_model(self.model)
1011
+ param_groups = base_model.get_param_groups(config)
1012
+ self.optimizer = AdamW(param_groups)
1013
+
1014
+ total_steps = math.ceil(
1015
+ (len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1)
1016
+ )
1017
+ self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7)
1018
+
1019
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
1020
+ self.global_step = 0
1021
+
1022
+ def _get_group_lr(self, group_name: str) -> Optional[float]:
1023
+ for group in self.optimizer.param_groups:
1024
+ if group.get("name") == group_name:
1025
+ return group.get("lr")
1026
+ return None
1027
+
1028
+ def train(self):
1029
+ self.model.train()
1030
+ if self.is_main_process:
1031
+ logger.info("=" * 60)
1032
+ logger.info("Starting v3 training with deep memory integration")
1033
+ logger.info("=" * 60)
1034
+
1035
+ last_epoch_loss = None
1036
+ for epoch in range(self.config.num_epochs):
1037
+ sampler = getattr(self.train_dataloader, "sampler", None)
1038
+ if sampler is not None and hasattr(sampler, "set_epoch"):
1039
+ sampler.set_epoch(epoch)
1040
+ if self.is_main_process:
1041
+ logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}")
1042
+
1043
+ epoch_loss = 0.0
1044
+ num_batches = 0
1045
+
1046
+ pbar = self.train_dataloader
1047
+ if self.is_main_process:
1048
+ pbar = tqdm(
1049
+ self.train_dataloader,
1050
+ desc=f"Epoch {epoch + 1}/{self.config.num_epochs}",
1051
+ leave=False,
1052
+ dynamic_ncols=True,
1053
+ )
1054
+
1055
+ for step, batch in enumerate(pbar):
1056
+ batch = {k: v.to(self.device) for k, v in batch.items()}
1057
+
1058
+ ga = max(self.config.gradient_accumulation_steps, 1)
1059
+ sync_gradients = ((step + 1) % ga == 0)
1060
+ amp_enabled = self.config.fp16 or self.config.bf16
1061
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
1062
+
1063
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
1064
+ if self.config.chunkwise_backward:
1065
+ labels = batch.get("labels")
1066
+ if labels is not None:
1067
+ total_tokens = int((labels[:, 1:] != -100).sum().item())
1068
+ else:
1069
+ total_tokens = 0
1070
+ loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga)
1071
+
1072
+ seq_len = batch["input_ids"].shape[1]
1073
+ chunk_size = int(self.config.chunk_size)
1074
+ chunk_ranges = [
1075
+ (start, min(start + chunk_size, seq_len))
1076
+ for start in range(0, seq_len, chunk_size)
1077
+ ]
1078
+ raw_loss_sum = None
1079
+
1080
+ for idx, (start, end) in enumerate(chunk_ranges):
1081
+ is_last_chunk = (idx == len(chunk_ranges) - 1)
1082
+ sync_chunk = sync_gradients and is_last_chunk
1083
+ chunk_ctx = (
1084
+ self.model.no_sync
1085
+ if (self.is_distributed and not sync_chunk)
1086
+ else nullcontext
1087
+ )
1088
+ with chunk_ctx():
1089
+ outputs = self.model(
1090
+ input_ids=batch["input_ids"],
1091
+ attention_mask=batch["attention_mask"],
1092
+ labels=labels,
1093
+ chunk_start=start,
1094
+ chunk_end=end,
1095
+ reset_mem_state=(idx == 0),
1096
+ )
1097
+ chunk_loss_sum = outputs["loss_sum"]
1098
+ if raw_loss_sum is None:
1099
+ raw_loss_sum = chunk_loss_sum.detach()
1100
+ else:
1101
+ raw_loss_sum = raw_loss_sum + chunk_loss_sum.detach()
1102
+
1103
+ scaled_loss = chunk_loss_sum * float(loss_scale)
1104
+ if self.config.fp16:
1105
+ self.scaler.scale(scaled_loss).backward()
1106
+ else:
1107
+ scaled_loss.backward()
1108
+
1109
+ if raw_loss_sum is None or total_tokens == 0:
1110
+ raw_loss = torch.zeros((), device=self.device, dtype=torch.float32)
1111
+ else:
1112
+ raw_loss = raw_loss_sum / total_tokens
1113
+ loss = raw_loss / ga
1114
+ else:
1115
+ ctx = self.model.no_sync if (self.is_distributed and not sync_gradients) else nullcontext
1116
+ with ctx():
1117
+ outputs = self.model(
1118
+ input_ids=batch["input_ids"],
1119
+ attention_mask=batch["attention_mask"],
1120
+ labels=batch["labels"],
1121
+ )
1122
+ raw_loss = outputs["loss"]
1123
+ loss = raw_loss / ga
1124
+
1125
+ if self.config.fp16:
1126
+ self.scaler.scale(loss).backward()
1127
+ else:
1128
+ loss.backward()
1129
+
1130
+ epoch_loss += raw_loss.detach().float().item()
1131
+ num_batches += 1
1132
+
1133
+ if sync_gradients:
1134
+ grad_norm = None
1135
+ if self.config.fp16:
1136
+ self.scaler.unscale_(self.optimizer)
1137
+ grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
1138
+ self.scaler.step(self.optimizer)
1139
+ self.scaler.update()
1140
+ else:
1141
+ grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
1142
+ self.optimizer.step()
1143
+
1144
+ self.scheduler.step()
1145
+ self.optimizer.zero_grad(set_to_none=True)
1146
+ self.global_step += 1
1147
+
1148
+ if self.is_main_process:
1149
+ avg_loss = epoch_loss / max(num_batches, 1)
1150
+ pbar.set_postfix({"gstep": self.global_step, "loss": f"{avg_loss:.4f}"})
1151
+
1152
+ if self.global_step % self.config.logging_steps == 0 and self.is_main_process:
1153
+ lr_mem = self._get_group_lr("memory_core") or 0.0
1154
+ lr_mem_attn = self._get_group_lr("memory_attention") or 0.0
1155
+ lr_pre = self._get_group_lr("pretrained") or 0.0
1156
+ grad_note = ""
1157
+ if self.config.debug_grad_norm and grad_norm is not None:
1158
+ grad_note = f" | grad_norm={float(grad_norm):.4f}"
1159
+ logger.info(
1160
+ f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | "
1161
+ f"lr_mem={lr_mem:.2e} | lr_mem_attn={lr_mem_attn:.2e} | lr_pre={lr_pre:.2e}{grad_note}"
1162
+ )
1163
+
1164
+ if self.global_step % self.config.eval_steps == 0:
1165
+ eval_metrics = self.evaluate()
1166
+ if self.is_main_process:
1167
+ logger.info(
1168
+ f"Step {self.global_step}: "
1169
+ f"eval_loss={eval_metrics['loss']:.4f}, "
1170
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1171
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1172
+ )
1173
+ self.model.train()
1174
+
1175
+ avg_epoch_loss = epoch_loss / max(num_batches, 1)
1176
+ if self.is_distributed:
1177
+ t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32)
1178
+ dist.all_reduce(t, op=dist.ReduceOp.SUM)
1179
+ avg_epoch_loss = (t / self.world_size).item()
1180
+
1181
+ if self.is_main_process:
1182
+ logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}")
1183
+ last_epoch_loss = avg_epoch_loss
1184
+
1185
+ eval_metrics = self.evaluate()
1186
+ if self.is_main_process:
1187
+ logger.info(
1188
+ f"[EPOCH {epoch + 1} EVAL] "
1189
+ f"eval_loss={eval_metrics['loss']:.4f}, "
1190
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1191
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1192
+ )
1193
+ self._append_eval_metrics(
1194
+ eval_metrics,
1195
+ phase="epoch",
1196
+ epoch=int(epoch + 1),
1197
+ train_avg_loss=avg_epoch_loss,
1198
+ )
1199
+ self.model.train()
1200
+
1201
+ if self.is_main_process:
1202
+ logger.info("Training done, final evaluation")
1203
+
1204
+ final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples))
1205
+ if self.is_main_process:
1206
+ ppl = float(math.exp(min(20.0, final_eval["loss"])))
1207
+ logger.info(
1208
+ f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl={ppl:.3f}, "
1209
+ f"em_acc={final_eval['em_acc'] * 100:.2f}%, "
1210
+ f"tok_acc={final_eval['tok_acc'] * 100:.2f}%"
1211
+ )
1212
+ logger.info("Saving final checkpoint")
1213
+ self._append_eval_metrics(
1214
+ final_eval,
1215
+ phase="final",
1216
+ epoch=int(self.config.num_epochs),
1217
+ train_avg_loss=last_epoch_loss,
1218
+ )
1219
+ self.save_final_checkpoint()
1220
+
1221
+ @torch.no_grad()
1222
+ def evaluate(self, print_examples: int = 0) -> Dict[str, float]:
1223
+ self.model.eval()
1224
+ total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1225
+ total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1226
+
1227
+ total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1228
+ total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1229
+ total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1230
+ total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32)
1231
+ printed = 0
1232
+
1233
+ for batch in self.eval_dataloader:
1234
+ batch = {k: v.to(self.device) for k, v in batch.items()}
1235
+ amp_enabled = self.config.fp16 or self.config.bf16
1236
+ amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16
1237
+ with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype):
1238
+ outputs = self.model(
1239
+ input_ids=batch["input_ids"],
1240
+ attention_mask=batch["attention_mask"],
1241
+ labels=batch["labels"],
1242
+ return_pred_tokens=True,
1243
+ topk=int(self.config.eval_topk) if self.config.eval_topk else 0,
1244
+ )
1245
+
1246
+ if torch.isfinite(outputs["loss"]):
1247
+ total_loss += outputs["loss"].detach().float()
1248
+ total_batches += 1.0
1249
+
1250
+ pred_ids = outputs.get("pred_ids", None)
1251
+ target_ids = outputs.get("target_ids", None)
1252
+ lengths = outputs.get("target_lengths", None)
1253
+
1254
+ if (
1255
+ pred_ids is not None
1256
+ and target_ids is not None
1257
+ and lengths is not None
1258
+ and pred_ids.ndim == 2
1259
+ and target_ids.ndim == 2
1260
+ and lengths.ndim == 1
1261
+ and pred_ids.shape == target_ids.shape
1262
+ and pred_ids.shape[0] == lengths.shape[0]
1263
+ ):
1264
+ pred_cpu = pred_ids.to("cpu", dtype=torch.long)
1265
+ tgt_cpu = target_ids.to("cpu", dtype=torch.long)
1266
+ len_cpu = lengths.to("cpu", dtype=torch.long)
1267
+
1268
+ for i in range(int(len_cpu.shape[0])):
1269
+ L = int(len_cpu[i].item())
1270
+ if L <= 0:
1271
+ continue
1272
+ p = pred_cpu[i, :L]
1273
+ t = tgt_cpu[i, :L]
1274
+
1275
+ total_tok_correct += torch.tensor(float((p == t).sum().item()), device=self.device, dtype=torch.float32)
1276
+ total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32)
1277
+
1278
+ if self.tokenizer is not None:
1279
+ pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip()
1280
+ tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip()
1281
+ em = float(pred_text == tgt_text)
1282
+ total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32)
1283
+ total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32)
1284
+
1285
+ if self.is_main_process and printed < print_examples:
1286
+ logger.info(f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}")
1287
+ printed += 1
1288
+
1289
+ if self.is_distributed:
1290
+ dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)
1291
+ dist.all_reduce(total_batches, op=dist.ReduceOp.SUM)
1292
+ dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM)
1293
+ dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM)
1294
+ dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM)
1295
+ dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM)
1296
+
1297
+ avg_loss = (total_loss / total_batches.clamp(min=1.0)).item()
1298
+ tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item()
1299
+ em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item()
1300
+
1301
+ return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc}
1302
+
1303
+ def _append_eval_metrics(
1304
+ self,
1305
+ metrics: Dict[str, float],
1306
+ *,
1307
+ phase: str,
1308
+ epoch: Optional[int],
1309
+ train_avg_loss: Optional[float],
1310
+ ) -> None:
1311
+ if not self.is_main_process:
1312
+ return
1313
+ os.makedirs(self.config.output_dir, exist_ok=True)
1314
+ record = {
1315
+ "phase": phase,
1316
+ "epoch": epoch,
1317
+ "global_step": int(self.global_step),
1318
+ "train_avg_loss": None if train_avg_loss is None else float(train_avg_loss),
1319
+ "eval_loss": float(metrics.get("loss", 0.0)),
1320
+ "em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0),
1321
+ "tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0),
1322
+ }
1323
+ metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl")
1324
+ with open(metrics_path, "a") as f:
1325
+ f.write(json.dumps(record) + "\n")
1326
+
1327
+ def save_final_checkpoint(self):
1328
+ ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name)
1329
+ base_model = unwrap_model(self.model)
1330
+
1331
+ # Save memory-related parameters
1332
+ memory_sd = {
1333
+ name: p.detach().cpu()
1334
+ for name, p in base_model.named_parameters()
1335
+ if ("neural_memory" in name) or ("mem_gate" in name) or
1336
+ ("memory_attention" in name) or ("memory_context_proj" in name)
1337
+ }
1338
+
1339
+ if is_fsdp_model(self.model) and len(memory_sd) == 0:
1340
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1341
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1342
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1343
+ full_sd = self.model.state_dict()
1344
+ memory_sd = {
1345
+ k: v for k, v in full_sd.items()
1346
+ if ("neural_memory" in k) or ("mem_gate" in k) or
1347
+ ("memory_attention" in k) or ("memory_context_proj" in k)
1348
+ }
1349
+
1350
+ if self.is_main_process:
1351
+ torch.save(
1352
+ {"memory_state_dict": memory_sd, "global_step": self.global_step, "config": asdict(self.config)},
1353
+ ckpt_path,
1354
+ )
1355
+ logger.info(f"Saved memory checkpoint: {ckpt_path}")
1356
+ if self.is_distributed:
1357
+ dist.barrier()
1358
+
1359
+ if self.config.save_full_checkpoint:
1360
+ full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name)
1361
+ if is_fsdp_model(self.model):
1362
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
1363
+ full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1364
+ with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg):
1365
+ full_sd = self.model.state_dict()
1366
+ else:
1367
+ full_sd = unwrap_model(self.model).state_dict()
1368
+
1369
+ if self.is_main_process:
1370
+ torch.save(
1371
+ {"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)},
1372
+ full_ckpt_path,
1373
+ )
1374
+ logger.info(f"Saved full checkpoint: {full_ckpt_path}")
1375
+ if self.is_distributed:
1376
+ dist.barrier()
1377
+
1378
+
1379
+ # =============================================================================
1380
+ # Main
1381
+ # =============================================================================
1382
+
1383
+ def main():
1384
+ from transformers import AutoModelForCausalLM, AutoTokenizer
1385
+
1386
+ parser = argparse.ArgumentParser(description="Qwen3 + Titans v3 Deep Integration Training")
1387
+ parser.add_argument("--fsdp", action="store_true")
1388
+ parser.add_argument("--eval_only", action="store_true")
1389
+ parser.add_argument("--ckpt_path", type=str, default=None)
1390
+ parser.add_argument("--max_samples", type=int, default=None)
1391
+ parser.add_argument("--max_length", type=int, default=None)
1392
+ parser.add_argument("--output_dir", type=str, default=None)
1393
+ parser.add_argument("--num_epochs", type=int, default=None)
1394
+ parser.add_argument("--eval_steps", type=int, default=None)
1395
+ parser.add_argument("--batch_size", type=int, default=None)
1396
+ parser.add_argument("--gradient_accumulation_steps", type=int, default=None)
1397
+ parser.add_argument("--chunk_size", type=int, default=None)
1398
+ parser.add_argument("--memory_layer_stride", type=int, default=None)
1399
+ parser.add_argument("--no_memory", action="store_true")
1400
+ parser.add_argument("--gradient_checkpointing", action="store_true")
1401
+ parser.add_argument("--no_chunkwise_backward", action="store_true")
1402
+
1403
+ # v3 specific arguments
1404
+ parser.add_argument("--detach_mem_state", action="store_true",
1405
+ help="Detach memory state (disable cross-chunk gradients)")
1406
+ parser.add_argument("--no_deep_integration", action="store_true",
1407
+ help="Disable deep attention integration")
1408
+ parser.add_argument("--no_memory_as_context", action="store_true",
1409
+ help="Disable memory-as-context projection")
1410
+ parser.add_argument("--cross_chunk_gradient_steps", type=int, default=None,
1411
+ help="Number of chunks to allow gradient flow through")
1412
+ parser.add_argument("--memory_depth", type=int, default=None)
1413
+ parser.add_argument("--num_memory_tokens", type=int, default=None)
1414
+
1415
+ parser.add_argument("--debug_grad_norm", action="store_true")
1416
+ args = parser.parse_args()
1417
+
1418
+ config = TrainingConfig()
1419
+
1420
+ # Apply arguments
1421
+ if args.fsdp:
1422
+ config.use_fsdp = True
1423
+ if args.no_memory:
1424
+ config.use_memory = False
1425
+ if args.max_samples is not None:
1426
+ config.max_samples = args.max_samples
1427
+ if args.max_length is not None:
1428
+ config.max_length = int(args.max_length)
1429
+ if args.output_dir is not None:
1430
+ config.output_dir = args.output_dir
1431
+ elif not config.use_memory:
1432
+ config.output_dir = "./outputs/qwen_babilong_no_memory_v3"
1433
+ if args.num_epochs is not None:
1434
+ config.num_epochs = args.num_epochs
1435
+ if args.eval_steps is not None:
1436
+ config.eval_steps = args.eval_steps
1437
+ if args.batch_size is not None:
1438
+ config.batch_size = int(args.batch_size)
1439
+ if args.gradient_accumulation_steps is not None:
1440
+ config.gradient_accumulation_steps = int(args.gradient_accumulation_steps)
1441
+ if args.chunk_size is not None:
1442
+ config.chunk_size = int(args.chunk_size)
1443
+ if args.memory_layer_stride is not None:
1444
+ config.memory_layer_stride = int(args.memory_layer_stride)
1445
+ if args.gradient_checkpointing:
1446
+ config.gradient_checkpointing = True
1447
+ if args.no_chunkwise_backward:
1448
+ config.chunkwise_backward = False
1449
+
1450
+ # v3 specific
1451
+ if args.detach_mem_state:
1452
+ config.detach_mem_state = True
1453
+ if args.no_deep_integration:
1454
+ config.deep_memory_integration = False
1455
+ if args.no_memory_as_context:
1456
+ config.memory_as_context = False
1457
+ if args.cross_chunk_gradient_steps is not None:
1458
+ config.cross_chunk_gradient_steps = int(args.cross_chunk_gradient_steps)
1459
+ if args.memory_depth is not None:
1460
+ config.memory_depth = int(args.memory_depth)
1461
+ if args.num_memory_tokens is not None:
1462
+ config.num_memory_tokens = int(args.num_memory_tokens)
1463
+ if args.debug_grad_norm:
1464
+ config.debug_grad_norm = True
1465
+
1466
+ is_distributed, rank, local_rank, world_size = init_distributed()
1467
+ is_main = (rank == 0)
1468
+
1469
+ if config.use_fsdp and config.chunkwise_backward:
1470
+ if is_main:
1471
+ logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.")
1472
+ config.chunkwise_backward = False
1473
+
1474
+ if is_distributed and (not config.use_fsdp) and config.gradient_checkpointing:
1475
+ config.gradient_checkpointing = False
1476
+ if is_main:
1477
+ logger.warning("gradient_checkpointing is unstable with DDP here; disabling it.")
1478
+
1479
+ if is_distributed and (not config.use_fsdp):
1480
+ if not config.ddp_find_unused_parameters:
1481
+ config.ddp_find_unused_parameters = True
1482
+ if is_main:
1483
+ logger.warning("Enabling DDP find_unused_parameters.")
1484
+
1485
+ torch.manual_seed(config.seed + rank)
1486
+
1487
+ if torch.cuda.is_available():
1488
+ device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda")
1489
+ else:
1490
+ device = torch.device("cpu")
1491
+
1492
+ if torch.cuda.is_available() and config.bf16:
1493
+ bf16_supported = False
1494
+ try:
1495
+ bf16_supported = torch.cuda.is_bf16_supported()
1496
+ except Exception:
1497
+ bf16_supported = False
1498
+ if not bf16_supported:
1499
+ if is_main:
1500
+ logger.warning("bf16 not supported; falling back to fp16.")
1501
+ config.bf16 = False
1502
+ if not config.fp16:
1503
+ config.fp16 = True
1504
+
1505
+ if torch.cuda.is_available() and getattr(config, "use_tf32", False):
1506
+ torch.backends.cuda.matmul.allow_tf32 = True
1507
+ torch.backends.cudnn.allow_tf32 = True
1508
+ try:
1509
+ torch.set_float32_matmul_precision("high")
1510
+ except Exception:
1511
+ pass
1512
+
1513
+ if is_main:
1514
+ logger.info("=" * 70)
1515
+ logger.info("Qwen3-4B + Titans v3 DEEP INTEGRATION Training")
1516
+ logger.info("=" * 70)
1517
+ logger.info(f"distributed={is_distributed}, world_size={world_size}")
1518
+ logger.info(f"model_path={config.model_path}")
1519
+ logger.info(f"data_path={config.data_path}")
1520
+ logger.info(f"output_dir={config.output_dir}")
1521
+ logger.info(f"max_samples={config.max_samples}")
1522
+ logger.info(f"max_length={config.max_length}")
1523
+ logger.info(f"num_epochs={config.num_epochs}")
1524
+ logger.info(f"chunk_size={config.chunk_size}")
1525
+ logger.info(f"use_memory={config.use_memory}")
1526
+ if config.use_memory:
1527
+ logger.info(f"memory_layer_stride={config.memory_layer_stride}")
1528
+ logger.info(f"memory_depth={config.memory_depth}")
1529
+ logger.info(f"deep_memory_integration={config.deep_memory_integration}")
1530
+ logger.info(f"memory_as_context={config.memory_as_context}")
1531
+ logger.info(f"detach_mem_state={config.detach_mem_state}")
1532
+ logger.info(f"cross_chunk_gradient_steps={config.cross_chunk_gradient_steps}")
1533
+ logger.info(f"num_memory_tokens={config.num_memory_tokens}")
1534
+
1535
+ tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True)
1536
+ if tokenizer.pad_token is None:
1537
+ tokenizer.pad_token = tokenizer.eos_token
1538
+
1539
+ # Disable flash-attn detection (torchao already disabled at top of file)
1540
+ try:
1541
+ import transformers
1542
+ from transformers.utils import import_utils as _import_utils
1543
+
1544
+ def _disabled(*args, **kwargs):
1545
+ return False
1546
+
1547
+ _import_utils.is_flash_attn_2_available = _disabled
1548
+ if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"):
1549
+ transformers.utils.is_flash_attn_2_available = _disabled
1550
+ if hasattr(_import_utils, "is_torchao_available"):
1551
+ _import_utils.is_torchao_available = _disabled
1552
+ if hasattr(_import_utils, "is_torchvision_available"):
1553
+ _import_utils.is_torchvision_available = _disabled
1554
+ except Exception as e:
1555
+ if is_main:
1556
+ logger.warning(f"Disable checks failed (ignored): {e}")
1557
+
1558
+ torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32)
1559
+
1560
+ qwen_model = AutoModelForCausalLM.from_pretrained(
1561
+ config.model_path,
1562
+ torch_dtype=torch_dtype,
1563
+ device_map=None,
1564
+ trust_remote_code=True,
1565
+ attn_implementation="sdpa",
1566
+ low_cpu_mem_usage=True,
1567
+ )
1568
+ qwen_model.to(device)
1569
+ qwen_model.config.use_cache = False
1570
+ if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"):
1571
+ qwen_model.gradient_checkpointing_enable()
1572
+
1573
+ train_dataset = BABILongDataset(
1574
+ config.data_path,
1575
+ tokenizer,
1576
+ max_length=config.max_length,
1577
+ answer_reserve_tokens=config.answer_reserve_tokens,
1578
+ label_prefix_tokens=config.label_prefix_tokens,
1579
+ max_samples=config.max_samples,
1580
+ )
1581
+
1582
+ train_size = int(0.9 * len(train_dataset))
1583
+ eval_size = len(train_dataset) - train_size
1584
+ train_dataset, eval_dataset = torch.utils.data.random_split(
1585
+ train_dataset,
1586
+ [train_size, eval_size],
1587
+ generator=torch.Generator().manual_seed(config.seed),
1588
+ )
1589
+
1590
+ train_sampler = None
1591
+ eval_sampler = None
1592
+ if is_distributed:
1593
+ from torch.utils.data.distributed import DistributedSampler
1594
+ train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed)
1595
+ eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False)
1596
+
1597
+ train_dataloader = DataLoader(
1598
+ train_dataset,
1599
+ batch_size=config.batch_size,
1600
+ shuffle=(train_sampler is None),
1601
+ sampler=train_sampler,
1602
+ collate_fn=collate_fn,
1603
+ num_workers=0,
1604
+ )
1605
+ eval_dataloader = DataLoader(
1606
+ eval_dataset,
1607
+ batch_size=config.batch_size,
1608
+ shuffle=False,
1609
+ sampler=eval_sampler,
1610
+ collate_fn=collate_fn,
1611
+ num_workers=0,
1612
+ )
1613
+
1614
+ model = QwenTitansForBABILongV3(qwen_model, config)
1615
+ model.to(device)
1616
+
1617
+ if is_distributed:
1618
+ if config.use_fsdp:
1619
+ from functools import partial
1620
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision
1621
+ from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
1622
+ from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer
1623
+
1624
+ mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype)
1625
+ auto_wrap = partial(
1626
+ transformer_auto_wrap_policy,
1627
+ transformer_layer_cls={Qwen3DecoderLayer, QwenDecoderLayerWithDeepMemory}
1628
+ )
1629
+
1630
+ model = FSDP(
1631
+ model,
1632
+ auto_wrap_policy=auto_wrap,
1633
+ mixed_precision=mp_policy,
1634
+ device_id=torch.cuda.current_device(),
1635
+ use_orig_params=config.fsdp_use_orig_params,
1636
+ ignored_modules=model.get_memory_modules(),
1637
+ )
1638
+ else:
1639
+ model = DDP(
1640
+ model,
1641
+ device_ids=[local_rank],
1642
+ output_device=local_rank,
1643
+ find_unused_parameters=config.ddp_find_unused_parameters,
1644
+ )
1645
+
1646
+ trainer = Trainer(
1647
+ model=model,
1648
+ train_dataloader=train_dataloader,
1649
+ eval_dataloader=eval_dataloader,
1650
+ config=config,
1651
+ rank=rank,
1652
+ world_size=world_size,
1653
+ is_distributed=is_distributed,
1654
+ tokenizer=tokenizer,
1655
+ )
1656
+
1657
+ if args.eval_only:
1658
+ ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name)
1659
+ if is_main:
1660
+ logger.info(f"eval_only: loading checkpoint: {ckpt_path}")
1661
+ ckpt = torch.load(ckpt_path, map_location="cpu")
1662
+
1663
+ memory_sd = ckpt.get("memory_state_dict", {})
1664
+ if len(memory_sd) > 0:
1665
+ unwrap_model(model).load_state_dict(memory_sd, strict=False)
1666
+
1667
+ eval_metrics = trainer.evaluate()
1668
+ if is_main:
1669
+ ppl = float(math.exp(min(20.0, eval_metrics["loss"])))
1670
+ logger.info(
1671
+ f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl={ppl:.3f}, "
1672
+ f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, "
1673
+ f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%"
1674
+ )
1675
+ cleanup_distributed()
1676
+ return
1677
+
1678
+ trainer.train()
1679
+ cleanup_distributed()
1680
+
1681
+
1682
+ if __name__ == "__main__":
1683
+ main()
fig1.png ADDED

Git LFS Details

  • SHA256: 0850659db065e7f2a7d5f9c7f12b6b3796e994d2d73f2d4064bbc551bd161bf4
  • Pointer size: 131 Bytes
  • Size of remote file: 173 kB
fig2.png ADDED

Git LFS Details

  • SHA256: 2fc6bbd15aaceddc118e40a4a619528e40b200ebee210e12e590fa0d0332ce1f
  • Pointer size: 131 Bytes
  • Size of remote file: 415 kB
outputs/freeze_base_500/eval_metrics.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 15.339897155761719, "eval_loss": 9.975086212158203, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
outputs/freeze_base_500_v2/eval_metrics.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 15.48836898803711, "eval_loss": 10.238042831420898, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
outputs/freeze_base_500_v4/eval_metrics.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 15.516389846801758, "eval_loss": 10.93748950958252, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
outputs/freeze_base_500_v5/eval_metrics.jsonl ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 15.43581771850586, "eval_loss": 10.169279098510742, "em_acc_pct": 1.9230769947171211, "tok_acc_pct": 1.9230769947171211}
2
+ {"phase": "epoch", "epoch": 2, "global_step": 14, "train_avg_loss": 6.823464870452881, "eval_loss": 4.242016792297363, "em_acc_pct": 23.076923191547394, "tok_acc_pct": 23.076923191547394}
3
+ {"phase": "epoch", "epoch": 3, "global_step": 21, "train_avg_loss": 2.964919328689575, "eval_loss": 2.7159345149993896, "em_acc_pct": 23.076923191547394, "tok_acc_pct": 23.076923191547394}
4
+ {"phase": "epoch", "epoch": 4, "global_step": 28, "train_avg_loss": 1.9492820501327515, "eval_loss": 2.363089084625244, "em_acc_pct": 28.846153616905212, "tok_acc_pct": 28.846153616905212}
5
+ {"phase": "epoch", "epoch": 5, "global_step": 35, "train_avg_loss": 1.6384100914001465, "eval_loss": 2.0535738468170166, "em_acc_pct": 26.923078298568726, "tok_acc_pct": 26.923078298568726}
6
+ {"phase": "epoch", "epoch": 6, "global_step": 42, "train_avg_loss": 1.5474846363067627, "eval_loss": 2.0488321781158447, "em_acc_pct": 25.0, "tok_acc_pct": 25.0}
7
+ {"phase": "epoch", "epoch": 7, "global_step": 49, "train_avg_loss": 1.438281774520874, "eval_loss": 2.1996049880981445, "em_acc_pct": 23.076923191547394, "tok_acc_pct": 23.076923191547394}
outputs/full_finetune_forwardmem_v2/eval_metrics.jsonl ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 12.47234058380127, "eval_loss": 6.324646949768066, "em_acc_pct": 13.461539149284363, "tok_acc_pct": 13.461539149284363}
2
+ {"phase": "epoch", "epoch": 2, "global_step": 14, "train_avg_loss": 4.019615173339844, "eval_loss": 3.0129523277282715, "em_acc_pct": 23.076923191547394, "tok_acc_pct": 23.076923191547394}
3
+ {"phase": "epoch", "epoch": 3, "global_step": 21, "train_avg_loss": 2.1279730796813965, "eval_loss": 2.2121071815490723, "em_acc_pct": 28.846153616905212, "tok_acc_pct": 28.846153616905212}
4
+ {"phase": "epoch", "epoch": 4, "global_step": 28, "train_avg_loss": 1.7763179540634155, "eval_loss": 2.294826030731201, "em_acc_pct": 25.0, "tok_acc_pct": 25.0}
outputs/qwen_ ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803]
2
+ W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803] *****************************************
3
+ W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
4
+ W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803] *****************************************
5
+ 2026-01-27 11:26:55,908 - WARNING - chunkwise_backward is incompatible with FSDP; disabling it.
6
+ 2026-01-27 11:26:55,909 - INFO - ============================================================
7
+ 2026-01-27 11:26:55,909 - INFO - Qwen3-4B + Titans training (DDP/FSDP)
8
+ 2026-01-27 11:26:55,909 - INFO - ============================================================
9
+ 2026-01-27 11:26:55,909 - INFO - distributed=True, world_size=4, use_fsdp=True
10
+ 2026-01-27 11:26:55,909 - INFO - mode=TRAIN
11
+ 2026-01-27 11:26:55,909 - INFO - model_path=/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554
12
+ 2026-01-27 11:26:55,909 - INFO - data_path=/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json
13
+ 2026-01-27 11:26:55,909 - INFO - output_dir=./outputs/qwen_babilong_no_memory
14
+ 2026-01-27 11:26:55,909 - INFO - max_samples=2500
15
+ 2026-01-27 11:26:55,909 - INFO - max_length=32768
16
+ 2026-01-27 11:26:55,909 - INFO - chunk_size=4096
17
+ 2026-01-27 11:26:55,909 - INFO - use_memory=False
18
+ 2026-01-27 11:26:55,909 - INFO - chunkwise_backward=False
19
+ 2026-01-27 11:26:55,909 - INFO - label_prefix_tokens=0
20
+ 2026-01-27 11:26:55,909 - INFO - detach_mem_state=True
21
+
22
+
23
+
24
+
25
+ 2026-01-27 11:27:28,081 - INFO - Loading dataset: /data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json
26
+ 2026-01-27 11:27:28,095 - INFO - Loading dataset: /data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json
27
+ 2026-01-27 11:27:28,096 - INFO - Loading dataset: /data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json
28
+ 2026-01-27 11:27:28,097 - INFO - Loading dataset: /data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json
29
+ 2026-01-27 11:27:32,272 - INFO - Dataset size: 2500
30
+ 2026-01-27 11:27:32,273 - INFO - [QwenTitansForBABILong] Initialized (memory disabled)
31
+ 2026-01-27 11:27:32,273 - INFO - - hidden_size: 2560
32
+ 2026-01-27 11:27:32,273 - INFO - - chunk_size: 4096
33
+ 2026-01-27 11:27:32,308 - INFO - Dataset size: 2500
34
+ 2026-01-27 11:27:32,309 - INFO - [QwenTitansForBABILong] Initialized (memory disabled)
35
+ 2026-01-27 11:27:32,309 - INFO - - hidden_size: 2560
36
+ 2026-01-27 11:27:32,309 - INFO - - chunk_size: 4096
37
+ 2026-01-27 11:27:32,347 - INFO - Param groups: memory=0, pretrained=398
38
+ /root/githubs/titans-pytorch/examples/train_qwen_titans_babilong.py:768: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.
39
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
40
+ 2026-01-27 11:27:32,349 - INFO - Start training
41
+ 2026-01-27 11:27:32,349 - INFO - Epoch 1/3
42
+
43
+ /root/githubs/titans-pytorch/examples/train_qwen_titans_babilong.py:768: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.
44
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
45
+ 2026-01-27 11:27:32,442 - INFO - Dataset size: 2500
46
+ 2026-01-27 11:27:32,444 - INFO - [QwenTitansForBABILong] Initialized (memory disabled)
47
+ 2026-01-27 11:27:32,444 - INFO - - hidden_size: 2560
48
+ 2026-01-27 11:27:32,444 - INFO - - chunk_size: 4096
49
+ 2026-01-27 11:27:32,462 - INFO - Dataset size: 2500
50
+ 2026-01-27 11:27:32,466 - INFO - [QwenTitansForBABILong] Initialized (memory disabled)
51
+ 2026-01-27 11:27:32,466 - INFO - - hidden_size: 2560
52
+ 2026-01-27 11:27:32,466 - INFO - - chunk_size: 4096
53
+ 2026-01-27 11:27:32,519 - INFO - Param groups: memory=0, pretrained=398
54
+ /root/githubs/titans-pytorch/examples/train_qwen_titans_babilong.py:768: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.
55
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
56
+ 2026-01-27 11:27:32,544 - INFO - Param groups: memory=0, pretrained=398
57
+ /root/githubs/titans-pytorch/examples/train_qwen_titans_babilong.py:768: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.
58
+ self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16)
59
+
60
+
61
+
62
+ Traceback (most recent call last):
63
+ File "/root/miniforge/lib/python3.12/subprocess.py", line 1990, in _internal_poll
64
+ pid, sts = _waitpid(self.pid, _WNOHANG)
65
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
66
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 85, in _terminate_process_handler
67
+ sigval = signal.Signals(signum)
68
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
69
+ torch.distributed.elastic.multiprocessing.api.SignalException: Process 3370624 got signal: 1
70
+
71
+ During handling of the above exception, another exception occurred:
72
+
73
+ Traceback (most recent call last):
74
+ File "/root/miniforge/bin/torchrun", line 7, in <module>
75
+ sys.exit(main())
76
+ ^^^^^^
77
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 357, in wrapper
78
+ @wraps(f)
79
+
80
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/run.py", line 936, in main
81
+ if args.module:
82
+ ^^^^^
83
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/run.py", line 927, in run
84
+ cmd_args.append(args.training_script)
85
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
86
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 156, in __call__
87
+ # entrypoint is a command and ``script.py`` is the python module.
88
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
89
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 284, in launch_agent
90
+ rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
91
+ ^^^^^^^^^^^
92
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper
93
+ raise
94
+
95
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 717, in run
96
+ """Restart (stops, rendezvous, starts) all local workers in the group."""
97
+ ^^^^^^^^^^^^^^^^^^^^^^
98
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 882, in _invoke_run
99
+ flakiness = 100.0
100
+ ^^^^^
101
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper
102
+ raise
103
+
104
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py", line 389, in _monitor_workers
105
+
106
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 537, in wait
107
+ "TORCHELASTIC_SIGNALS_TO_HANDLE", "SIGTERM,SIGINT,SIGHUP,SIGQUIT"
108
+ ^^^^^^^^^^^^
109
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 858, in _poll
110
+ def pids(self) -> dict[int, int]:
111
+ ^^^^^^^^^^^^^^^
112
+ File "/root/miniforge/lib/python3.12/subprocess.py", line 1236, in poll
113
+ return self._internal_poll()
114
+ ^^^^^^^^^^^^^^^^^^^^^
115
+ File "/root/miniforge/lib/python3.12/subprocess.py", line 2004, in _internal_poll
116
+ self._waitpid_lock.release()
117
+ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 85, in _terminate_process_handler
118
+ sigval = signal.Signals(signum)
119
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
120
+ torch.distributed.elastic.multiprocessing.api.SignalException: Process 3370624 got signal: 15
121
+ [W128 06:35:14.145474637 AllocatorConfig.cpp:28] Warning: PYTORCH_CUDA_ALLOC_CONF is deprecated, use PYTORCH_ALLOC_CONF instead (function operator())
outputs/qwen_babilong_no_memory/eval_metrics.jsonl ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 17, "train_avg_loss": 5.368525505065918, "eval_loss": 1.7918081283569336, "em_acc_pct": 29.36508059501648, "tok_acc_pct": 29.36508059501648}
2
+ {"phase": "epoch", "epoch": 2, "global_step": 34, "train_avg_loss": 1.7324517965316772, "eval_loss": 1.672330379486084, "em_acc_pct": 31.34920597076416, "tok_acc_pct": 31.34920597076416}
3
+ {"phase": "epoch", "epoch": 3, "global_step": 51, "train_avg_loss": 1.6770515441894531, "eval_loss": 1.6550273895263672, "em_acc_pct": 30.95238208770752, "tok_acc_pct": 30.95238208770752}
4
+ {"phase": "final", "epoch": 3, "global_step": 51, "train_avg_loss": 1.6770515441894531, "eval_loss": 1.6550273895263672, "em_acc_pct": 30.95238208770752, "tok_acc_pct": 30.95238208770752}
outputs/qwen_baseline_babilong_v4_ga2_4gpu/eval_metrics.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 56, "train_avg_loss": 6.816544055938721, "eval_loss": 2.1255977153778076, "em_acc_pct": 23.076923191547394, "tok_acc_pct": 23.076923191547394}
2
+ {"phase": "epoch", "epoch": 2, "global_step": 112, "train_avg_loss": 1.799431562423706, "eval_loss": 2.0042293071746826, "em_acc_pct": 15.384615957736969, "tok_acc_pct": 15.384615957736969}
3
+ {"phase": "epoch", "epoch": 3, "global_step": 168, "train_avg_loss": 1.7182527780532837, "eval_loss": 2.104633331298828, "em_acc_pct": 15.384615957736969, "tok_acc_pct": 15.384615957736969}
outputs/qwen_baseline_eval/baseline_results_eval.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "split": "eval",
3
+ "total_samples": 50,
4
+ "total_tokens": 50,
5
+ "loss": 18.038174209594725,
6
+ "perplexity": 68214943.35535261,
7
+ "tok_acc_pct": 0.0,
8
+ "em_acc_pct": 0.0,
9
+ "config": {
10
+ "model_path": "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554",
11
+ "data_path": "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json",
12
+ "max_samples": 500,
13
+ "max_length": 32768,
14
+ "chunk_size": 8192
15
+ }
16
+ }
outputs/qwen_titans_babilong/eval_metrics.jsonl ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 17, "train_avg_loss": 0.3730778992176056, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
2
+ {"phase": "epoch", "epoch": 2, "global_step": 34, "train_avg_loss": 0.3728504478931427, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
3
+ {"phase": "epoch", "epoch": 3, "global_step": 51, "train_avg_loss": 0.3728504478931427, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
4
+ {"phase": "epoch", "epoch": 4, "global_step": 68, "train_avg_loss": 0.3728504478931427, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
5
+ {"phase": "epoch", "epoch": 5, "global_step": 85, "train_avg_loss": 0.3728504478931427, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
6
+ {"phase": "epoch", "epoch": 6, "global_step": 102, "train_avg_loss": 0.3728504478931427, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
7
+ {"phase": "epoch", "epoch": 7, "global_step": 119, "train_avg_loss": 0.3728504478931427, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
8
+ {"phase": "epoch", "epoch": 1, "global_step": 0, "train_avg_loss": 11.931214332580566, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
9
+ {"phase": "final", "epoch": 1, "global_step": 0, "train_avg_loss": 11.931214332580566, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
10
+ {"phase": "epoch", "epoch": 1, "global_step": 2, "train_avg_loss": 11.931214332580566, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
11
+ {"phase": "epoch", "epoch": 2, "global_step": 4, "train_avg_loss": 11.931214332580566, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
12
+ {"phase": "epoch", "epoch": 3, "global_step": 6, "train_avg_loss": 11.931214332580566, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
13
+ {"phase": "final", "epoch": 3, "global_step": 6, "train_avg_loss": 11.931214332580566, "eval_loss": 11.931214332580566, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
14
+ {"phase": "epoch", "epoch": 1, "global_step": 2, "train_avg_loss": 18.321178436279297, "eval_loss": 21.45726776123047, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
15
+ {"phase": "epoch", "epoch": 2, "global_step": 4, "train_avg_loss": 16.057231903076172, "eval_loss": 20.39411163330078, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
16
+ {"phase": "epoch", "epoch": 3, "global_step": 6, "train_avg_loss": 15.725045204162598, "eval_loss": 20.471101760864258, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
17
+ {"phase": "final", "epoch": 3, "global_step": 6, "train_avg_loss": 15.725045204162598, "eval_loss": 20.471101760864258, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
18
+ {"phase": "epoch", "epoch": 1, "global_step": 2, "train_avg_loss": 18.435989379882812, "eval_loss": 21.166339874267578, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
19
+ {"phase": "epoch", "epoch": 2, "global_step": 4, "train_avg_loss": 15.885025024414062, "eval_loss": 20.61844825744629, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
20
+ {"phase": "epoch", "epoch": 3, "global_step": 6, "train_avg_loss": 16.04737663269043, "eval_loss": 20.586658477783203, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
21
+ {"phase": "final", "epoch": 3, "global_step": 6, "train_avg_loss": 16.04737663269043, "eval_loss": 20.586658477783203, "em_acc_pct": 0.0, "tok_acc_pct": 0.0}
22
+ {"phase": "epoch", "epoch": 1, "global_step": 15, "train_avg_loss": 3.3097920417785645, "eval_loss": 3.314453601837158, "em_acc_pct": 0.0, "tok_acc_pct": 37.87878751754761}
23
+ {"phase": "epoch", "epoch": 2, "global_step": 30, "train_avg_loss": 3.364327907562256, "eval_loss": 3.375408172607422, "em_acc_pct": 0.0, "tok_acc_pct": 37.12121248245239}
24
+ {"phase": "epoch", "epoch": 3, "global_step": 45, "train_avg_loss": 3.3895301818847656, "eval_loss": 3.3486533164978027, "em_acc_pct": 0.0, "tok_acc_pct": 37.5}
25
+ {"phase": "final", "epoch": 3, "global_step": 45, "train_avg_loss": 3.3895301818847656, "eval_loss": 3.3486533164978027, "em_acc_pct": 0.0, "tok_acc_pct": 37.5}
26
+ {"phase": "epoch", "epoch": 1, "global_step": 2, "train_avg_loss": 3.844731330871582, "eval_loss": 3.2611234188079834, "em_acc_pct": 0.0, "tok_acc_pct": 41.66666567325592}
27
+ {"phase": "final", "epoch": 1, "global_step": 2, "train_avg_loss": 3.844731330871582, "eval_loss": 3.2611234188079834, "em_acc_pct": 0.0, "tok_acc_pct": 41.66666567325592}
28
+ {"phase": "epoch", "epoch": 1, "global_step": 17, "train_avg_loss": 9.678044319152832, "eval_loss": 5.9935407638549805, "em_acc_pct": 19.84127014875412, "tok_acc_pct": 19.84127014875412}
29
+ {"phase": "epoch", "epoch": 2, "global_step": 34, "train_avg_loss": 5.083719730377197, "eval_loss": 4.875655174255371, "em_acc_pct": 18.25396865606308, "tok_acc_pct": 18.25396865606308}
30
+ {"phase": "epoch", "epoch": 3, "global_step": 51, "train_avg_loss": 4.628010272979736, "eval_loss": 4.714605808258057, "em_acc_pct": 21.82539701461792, "tok_acc_pct": 21.82539701461792}
31
+ {"phase": "final", "epoch": 3, "global_step": 51, "train_avg_loss": 4.628010272979736, "eval_loss": 4.714605808258057, "em_acc_pct": 21.82539701461792, "tok_acc_pct": 21.82539701461792}
outputs/qwen_titans_babilong_detach_4gpu_bs1_ckpt/eval_metrics.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 35, "train_avg_loss": 4.500145435333252, "eval_loss": 1.749605655670166, "em_acc_pct": 30.158731341362, "tok_acc_pct": 30.158731341362}
outputs/qwen_titans_babilong_v3/eval_metrics.jsonl ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 10.893842697143555, "eval_loss": 5.053490161895752, "em_acc_pct": 21.153846383094788, "tok_acc_pct": 21.153846383094788}
2
+ {"phase": "epoch", "epoch": 1, "global_step": 7, "train_avg_loss": 10.93690013885498, "eval_loss": 5.081326484680176, "em_acc_pct": 23.076923191547394, "tok_acc_pct": 23.076923191547394}
3
+ {"phase": "epoch", "epoch": 2, "global_step": 14, "train_avg_loss": 3.1573493480682373, "eval_loss": 2.4568819999694824, "em_acc_pct": 28.846153616905212, "tok_acc_pct": 28.846153616905212}
4
+ {"phase": "epoch", "epoch": 3, "global_step": 21, "train_avg_loss": 1.8114500045776367, "eval_loss": 2.129908561706543, "em_acc_pct": 32.692307233810425, "tok_acc_pct": 32.692307233810425}
5
+ {"phase": "epoch", "epoch": 4, "global_step": 28, "train_avg_loss": 1.5927897691726685, "eval_loss": 2.193814516067505, "em_acc_pct": 26.923078298568726, "tok_acc_pct": 26.923078298568726}
pyproject.toml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "titans-pytorch"
3
+ version = "0.5.0"
4
+ description = "Titans"
5
+ authors = [
6
+ { name = "Phil Wang", email = "lucidrains@gmail.com" }
7
+ ]
8
+ readme = "README.md"
9
+ requires-python = ">= 3.9"
10
+ license = { file = "LICENSE" }
11
+ keywords = [
12
+ 'artificial intelligence',
13
+ 'deep learning',
14
+ 'test time training',
15
+ 'linear attention',
16
+ 'memory',
17
+ ]
18
+
19
+ classifiers=[
20
+ 'Development Status :: 4 - Beta',
21
+ 'Intended Audience :: Developers',
22
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
23
+ 'License :: OSI Approved :: MIT License',
24
+ 'Programming Language :: Python :: 3.9',
25
+ ]
26
+
27
+ dependencies = [
28
+ "assoc-scan>=0.0.4",
29
+ "axial_positional_embedding>=0.3.10",
30
+ "einops>=0.8.0",
31
+ "einx>=0.3.0",
32
+ "hyper-connections>=0.3.11",
33
+ "Ninja",
34
+ "rotary-embedding-torch",
35
+ "tensordict",
36
+ "torch>=2.8",
37
+ "tqdm",
38
+ "x-transformers"
39
+ ]
40
+
41
+ [project.urls]
42
+ Homepage = "https://pypi.org/project/titans-pytorch/"
43
+ Repository = "https://github.com/lucidrains/titans-pytorch"
44
+
45
+ [project.optional-dependencies]
46
+
47
+ examples = [
48
+ "adam-atan2-pytorch>=0.1.18",
49
+ "wandb"
50
+ ]
51
+
52
+ test = [
53
+ "pytest"
54
+ ]
55
+
56
+ [tool.pytest.ini_options]
57
+ pythonpath = [
58
+ "."
59
+ ]
60
+
61
+ [build-system]
62
+ requires = ["hatchling"]
63
+ build-backend = "hatchling.build"
64
+
65
+ [tool.rye]
66
+ managed = true
67
+ dev-dependencies = []
68
+
69
+ [tool.hatch.metadata]
70
+ allow-direct-references = true
71
+
72
+ [tool.hatch.build.targets.wheel]
73
+ packages = ["titans_pytorch"]
tests/test_titans.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ import pytest
7
+ from titans_pytorch import NeuralMemory
8
+ from titans_pytorch.mac_transformer import flex_attention, SegmentedAttention, MemoryAsContextTransformer
9
+
10
+ # functions
11
+
12
+ def exists(v):
13
+ return v is not None
14
+
15
+ def diff(x, y):
16
+ return (x - y).abs().amax()
17
+
18
+ @contextmanager
19
+ def torch_default_dtype(dtype):
20
+ prev_dtype = torch.get_default_dtype()
21
+ torch.set_default_dtype(dtype)
22
+ yield
23
+ torch.set_default_dtype(prev_dtype)
24
+
25
+ # main test
26
+
27
+ @pytest.mark.parametrize('seq_len', (32, 512, 77))
28
+ @pytest.mark.parametrize('silu', (False, True))
29
+ @pytest.mark.parametrize('chunk_size, attn_pool_chunks', ((64, True), (64, False), (1, False)))
30
+ @pytest.mark.parametrize('momentum', (False, True))
31
+ @pytest.mark.parametrize('qk_rmsnorm', (False, True))
32
+ @pytest.mark.parametrize('heads', (1, 4))
33
+ @pytest.mark.parametrize('max_grad_norm', (None, 2.))
34
+ @pytest.mark.parametrize('num_kv_per_token', (1, 2))
35
+ @pytest.mark.parametrize('per_parameter_lr_modulation', (False, True))
36
+ @pytest.mark.parametrize('per_head_learned_parameters', (False, True))
37
+ @pytest.mark.parametrize('test_store_mask', (False, True))
38
+ def test_titans(
39
+ seq_len,
40
+ silu,
41
+ attn_pool_chunks,
42
+ chunk_size,
43
+ momentum,
44
+ qk_rmsnorm,
45
+ heads,
46
+ max_grad_norm,
47
+ num_kv_per_token,
48
+ per_parameter_lr_modulation,
49
+ per_head_learned_parameters,
50
+ test_store_mask
51
+ ):
52
+ mem = NeuralMemory(
53
+ dim = 16,
54
+ chunk_size = chunk_size,
55
+ activation = nn.SiLU() if silu else None,
56
+ attn_pool_chunks = attn_pool_chunks,
57
+ max_grad_norm = max_grad_norm,
58
+ num_kv_per_token = num_kv_per_token,
59
+ momentum = momentum,
60
+ qk_rmsnorm = qk_rmsnorm,
61
+ heads = heads,
62
+ per_parameter_lr_modulation = per_parameter_lr_modulation,
63
+ per_head_learned_parameters = per_head_learned_parameters
64
+ )
65
+
66
+ seq = torch.randn(2, seq_len, 16)
67
+
68
+ store_mask = None
69
+
70
+ if test_store_mask:
71
+ store_mask = torch.randint(0, 2, (2, seq_len)).bool()
72
+
73
+ retrieved, _ = mem(seq, store_mask = store_mask)
74
+
75
+ assert seq.shape == retrieved.shape
76
+
77
+ def test_return_surprises():
78
+
79
+ mem = NeuralMemory(
80
+ dim = 384,
81
+ chunk_size = 2,
82
+ dim_head = 64,
83
+ heads = 4,
84
+ )
85
+
86
+ seq = torch.randn(4, 64, 384)
87
+
88
+ _, _, (surprises, adaptive_lr) = mem(seq, return_surprises = True)
89
+
90
+ assert all([t.shape == (4, 4, 64) for t in (surprises, adaptive_lr)])
91
+
92
+ @pytest.mark.parametrize('learned_momentum_combine', (False, True))
93
+ @pytest.mark.parametrize('learned_combine_include_zeroth', (False, True))
94
+ def test_titans_second_order_momentum(
95
+ learned_momentum_combine,
96
+ learned_combine_include_zeroth
97
+ ):
98
+
99
+ mem = NeuralMemory(
100
+ dim = 384,
101
+ dim_head = 64,
102
+ heads = 2,
103
+ chunk_size = 1,
104
+ batch_size = 2,
105
+ momentum_order = 2,
106
+ learned_momentum_combine = learned_momentum_combine,
107
+ learned_combine_include_zeroth = learned_combine_include_zeroth
108
+ )
109
+
110
+ seq = torch.randn(2, 5, 384)
111
+
112
+ parallel_retrieved, state = mem(seq)
113
+ assert seq.shape == parallel_retrieved.shape
114
+
115
+ def test_titans_attn_memory():
116
+ from titans_pytorch.memory_models import MemoryAttention
117
+
118
+ mem = NeuralMemory(
119
+ dim = 16,
120
+ chunk_size = 64,
121
+ model = MemoryAttention(
122
+ dim = 16
123
+ )
124
+ )
125
+
126
+ seq = torch.randn(2, 1024, 16)
127
+ retrieved, _ = mem(seq)
128
+
129
+ assert seq.shape == retrieved.shape
130
+
131
+ def test_swiglu_ff_memory():
132
+ from titans_pytorch.memory_models import MemorySwiGluMLP
133
+
134
+ mem = NeuralMemory(
135
+ dim = 16,
136
+ chunk_size = 2,
137
+ mem_model_norm_add_residual = False,
138
+ model = MemorySwiGluMLP(
139
+ dim = 16,
140
+ depth = 2
141
+ )
142
+ )
143
+
144
+ seq = torch.randn(2, 64, 16)
145
+ retrieved, _ = mem(seq)
146
+
147
+ assert seq.shape == retrieved.shape
148
+
149
+ @pytest.mark.parametrize('gated_transition', (True, False))
150
+ def test_neural_mem_chaining_chunks(
151
+ gated_transition
152
+ ):
153
+ mem = NeuralMemory(
154
+ dim = 16,
155
+ dim_head = 16,
156
+ heads = 2,
157
+ chunk_size = 16,
158
+ gated_transition = gated_transition
159
+ )
160
+
161
+ seq = torch.randn(2, 48, 16)
162
+
163
+ parallel_retrieved, state = mem(seq)
164
+
165
+ seq_first, seq_second, seq_third = seq.split(16, dim = 1)
166
+
167
+ first_retrieved, state = mem(seq_first)
168
+ second_retrieved, state = mem(seq_second, state = state)
169
+ third_retrieved, state = mem(seq_third, state = state)
170
+
171
+ assert torch.allclose(parallel_retrieved, torch.cat((first_retrieved, second_retrieved, third_retrieved), dim = 1), atol = 1e-5)
172
+
173
+ def test_neural_mem_chaining_with_weight_residual():
174
+ mem = NeuralMemory(
175
+ dim = 16,
176
+ dim_head = 16,
177
+ heads = 2,
178
+ chunk_size = 64
179
+ )
180
+
181
+ mem2 = NeuralMemory(
182
+ dim = 16,
183
+ dim_head = 16,
184
+ heads = 2,
185
+ chunk_size = 64,
186
+ accept_weight_residual = True
187
+ )
188
+
189
+ seq = torch.randn(2, 256, 16)
190
+
191
+ seq, state = mem(seq)
192
+
193
+ parallel_retrieved, _ = mem2(seq, prev_weights = state.updates)
194
+
195
+ seq_first, seq_second = seq[:, :128], seq[:, 128:]
196
+
197
+ first_retrieved, state1 = mem2(seq_first, prev_weights = state.updates)
198
+ second_retrieved, state2 = mem2(seq_second, state = state1, prev_weights = state.updates)
199
+
200
+ assert torch.allclose(parallel_retrieved, torch.cat((first_retrieved, second_retrieved), dim = 1), atol = 1e-5)
201
+
202
+ def test_neural_mem_chaining_with_batch_size():
203
+ mem = NeuralMemory(
204
+ dim = 16,
205
+ dim_head = 16,
206
+ heads = 2,
207
+ chunk_size = 16,
208
+ batch_size = 64
209
+ )
210
+
211
+ seq = torch.randn(2, 112, 16)
212
+
213
+ parallel_retrieved, state = mem(seq)
214
+
215
+ seq_first, seq_second, seq_third = seq[:, :16], seq[:, 16:64], seq[:, 64:]
216
+
217
+ first_retrieved, state = mem(seq_first)
218
+ second_retrieved, state = mem(seq_second, state = state)
219
+ third_retrieved, state = mem(seq_third, state = state)
220
+
221
+ parallel_part_retrieved = torch.cat((first_retrieved, second_retrieved, third_retrieved), dim = 1)
222
+
223
+ assert torch.allclose(parallel_retrieved, parallel_part_retrieved, atol = 1e-5)
224
+
225
+ @pytest.mark.parametrize('seq_len', (1023, 17))
226
+ @pytest.mark.parametrize('num_persist_mem_tokens', (0, 16))
227
+ @pytest.mark.parametrize('num_longterm_mem_tokens', (0, 16))
228
+ @pytest.mark.parametrize('neural_mem_gate_attn_output', (False, True))
229
+ @pytest.mark.parametrize('neural_mem_segment_len', (8, 16))
230
+ @pytest.mark.parametrize('neural_mem_weight_residual', (False, True))
231
+ @pytest.mark.parametrize('neural_mem_batch_size', (None, 64))
232
+ @pytest.mark.parametrize('neural_mem_qkv_receives_diff_views', (False, True))
233
+ @pytest.mark.parametrize('neural_mem_momentum', (False, True))
234
+ def test_mac(
235
+ seq_len,
236
+ num_persist_mem_tokens,
237
+ num_longterm_mem_tokens,
238
+ neural_mem_gate_attn_output,
239
+ neural_mem_segment_len,
240
+ neural_mem_weight_residual,
241
+ neural_mem_batch_size,
242
+ neural_mem_qkv_receives_diff_views,
243
+ neural_mem_momentum
244
+ ):
245
+ transformer = MemoryAsContextTransformer(
246
+ num_tokens = 256,
247
+ dim = 16,
248
+ depth = 2,
249
+ num_persist_mem_tokens = num_persist_mem_tokens,
250
+ num_longterm_mem_tokens = num_longterm_mem_tokens,
251
+ segment_len = 128,
252
+ neural_mem_gate_attn_output = neural_mem_gate_attn_output,
253
+ neural_memory_segment_len = neural_mem_segment_len,
254
+ neural_memory_batch_size = neural_mem_batch_size,
255
+ neural_memory_qkv_receives_diff_views = neural_mem_qkv_receives_diff_views,
256
+ neural_mem_weight_residual = neural_mem_weight_residual,
257
+ neural_memory_kwargs = dict(
258
+ momentum = neural_mem_momentum
259
+ )
260
+ )
261
+
262
+ x = torch.randint(0, 256, (1, seq_len))
263
+
264
+ logits = transformer(x)
265
+ assert logits.shape == (1, seq_len, 256)
266
+
267
+ @pytest.mark.parametrize('sliding', (False, True))
268
+ @pytest.mark.parametrize('mem_layers', ((), None))
269
+ @pytest.mark.parametrize('longterm_mems', (0, 4, 16))
270
+ @pytest.mark.parametrize('prompt_len', (4, 16))
271
+ @torch_default_dtype(torch.float64)
272
+ def test_mac_sampling(
273
+ sliding,
274
+ mem_layers,
275
+ longterm_mems,
276
+ prompt_len
277
+ ):
278
+ transformer = MemoryAsContextTransformer(
279
+ num_tokens = 256,
280
+ dim = 16,
281
+ depth = 4,
282
+ segment_len = 32,
283
+ num_persist_mem_tokens = 4,
284
+ num_longterm_mem_tokens = longterm_mems,
285
+ sliding_window_attn = sliding,
286
+ neural_memory_layers = mem_layers,
287
+ neural_mem_gate_attn_output = False
288
+ )
289
+
290
+ ids = torch.randint(0, 256, (1, 1023))
291
+
292
+ # after much training
293
+
294
+ prompt = ids[:, :prompt_len]
295
+
296
+ sampled = transformer.sample(prompt, 53, use_cache = False, temperature = 0.)
297
+ sampled_with_cache = transformer.sample(prompt, 53, use_cache = True, temperature = 0.)
298
+
299
+ assert torch.allclose(sampled, sampled_with_cache)
300
+
301
+ @pytest.mark.parametrize('seq_len', (2, 64, 256))
302
+ @pytest.mark.parametrize('prompt_len', (0, 65))
303
+ @pytest.mark.parametrize('mem_chunk_size', (2, 32, 64))
304
+ @pytest.mark.parametrize('gated_transition', (False, True))
305
+ @torch_default_dtype(torch.float64)
306
+ def test_neural_mem_inference(
307
+ seq_len,
308
+ prompt_len,
309
+ mem_chunk_size,
310
+ gated_transition
311
+ ):
312
+
313
+ mem = NeuralMemory(
314
+ dim = 16,
315
+ chunk_size = mem_chunk_size,
316
+ gated_transition = gated_transition
317
+ )
318
+
319
+ seq = torch.randn(2, seq_len, 16)
320
+ parallel_retrieved, _ = mem(seq)
321
+
322
+ assert seq.shape == parallel_retrieved.shape
323
+
324
+ state = None
325
+ sequential_retrieved = []
326
+
327
+ # test initial parallel prompt
328
+
329
+ test_parallel_prompt = prompt_len > 0 and prompt_len < seq_len
330
+
331
+ if test_parallel_prompt:
332
+ prompt, seq = seq[:, :prompt_len], seq[:, prompt_len:]
333
+ retrieved_prompt, state = mem(prompt)
334
+ sequential_retrieved.append(retrieved_prompt)
335
+
336
+ # sequential inference
337
+
338
+ for token in seq.unbind(dim = 1):
339
+
340
+ one_retrieved, state = mem.forward(
341
+ token,
342
+ state = state,
343
+ )
344
+
345
+ sequential_retrieved.append(one_retrieved)
346
+
347
+ sequential_retrieved = torch.cat(sequential_retrieved, dim = -2)
348
+
349
+ assert torch.allclose(parallel_retrieved, sequential_retrieved, atol = 1e-6)
350
+
351
+ @pytest.mark.parametrize('seq_len', (1023, 17))
352
+ @pytest.mark.parametrize('sliding', (True, False))
353
+ def test_flex(
354
+ seq_len,
355
+ sliding
356
+ ):
357
+ if not (torch.cuda.is_available() and exists(flex_attention)):
358
+ pytest.skip()
359
+
360
+ attn = SegmentedAttention(
361
+ dim = 16,
362
+ segment_len = 32,
363
+ num_persist_mem_tokens = 1,
364
+ num_longterm_mem_tokens = 1,
365
+ use_flex_attn = True,
366
+ sliding = sliding
367
+ ).cuda()
368
+
369
+ seq = torch.randn(1, seq_len, 16).cuda()
370
+
371
+ out_flex, _ = attn(seq)
372
+ out_non_flex, _ = attn(seq, disable_flex_attn = True)
373
+
374
+ assert torch.allclose(out_flex, out_non_flex, atol = 1e-5)
375
+
376
+ @pytest.mark.parametrize('use_accelerated', (True, False))
377
+ def test_assoc_scan(
378
+ use_accelerated
379
+ ):
380
+ from titans_pytorch.neural_memory import AssocScan
381
+
382
+ if use_accelerated and not torch.cuda.is_available():
383
+ pytest.skip()
384
+
385
+ scan = AssocScan(use_accelerated = use_accelerated)
386
+
387
+ seq_len = 128
388
+ mid_point = seq_len // 2
389
+
390
+ gates = torch.randn(2, seq_len, 16).sigmoid()
391
+ inputs = torch.randn(2, seq_len, 16)
392
+
393
+ if use_accelerated:
394
+ gates = gates.cuda()
395
+ inputs = inputs.cuda()
396
+
397
+ output = scan(gates, inputs)
398
+
399
+ gates1, gates2 = gates[:, :mid_point], gates[:, mid_point:]
400
+ inputs1, inputs2 = inputs[:, :mid_point], inputs[:, mid_point:]
401
+
402
+ first_half = scan(gates1, inputs1)
403
+
404
+ second_half = scan(gates2, inputs2, prev = first_half[:, -1])
405
+ assert second_half.shape == inputs2.shape
406
+
407
+ assert torch.allclose(output[:, -1], second_half[:, -1], atol = 1e-5)
408
+
409
+ def test_mem_state_detach():
410
+ from titans_pytorch.neural_memory import mem_state_detach
411
+
412
+ mem = NeuralMemory(
413
+ dim = 384,
414
+ chunk_size = 2,
415
+ qk_rmsnorm = True,
416
+ dim_head = 64,
417
+ heads = 4,
418
+ )
419
+
420
+ seq = torch.randn(4, 64, 384)
421
+
422
+ state = None
423
+
424
+ for _ in range(2):
425
+ parallel_retrieved, state = mem(seq, state = state)
426
+ state = mem_state_detach(state)
427
+ parallel_retrieved.sum().backward()
titans_pytorch/__init__.py CHANGED
@@ -1,17 +1,17 @@
1
- """
2
- Minimal `titans_pytorch` exports for `train_qwen_titans_babilong_v4.py`.
 
 
 
3
 
4
- This Hugging Face repo intentionally contains only the code paths used by the v4
5
- training script (NeuralMemory + MemoryMLP), and does NOT ship unrelated modules
6
- from the original project.
7
- """
 
 
 
8
 
9
- from titans_pytorch.neural_memory import NeuralMemState, NeuralMemory, mem_state_detach
10
- from titans_pytorch.memory_models import MemoryMLP
11
-
12
- __all__ = [
13
- "NeuralMemory",
14
- "NeuralMemState",
15
- "mem_state_detach",
16
- "MemoryMLP",
17
- ]
 
1
+ from titans_pytorch.neural_memory import (
2
+ NeuralMemory,
3
+ NeuralMemState,
4
+ mem_state_detach
5
+ )
6
 
7
+ from titans_pytorch.memory_models import (
8
+ MemoryMLP,
9
+ MemoryAttention,
10
+ FactorizedMemoryMLP,
11
+ MemorySwiGluMLP,
12
+ GatedResidualMemoryMLP
13
+ )
14
 
15
+ from titans_pytorch.mac_transformer import (
16
+ MemoryAsContextTransformer
17
+ )
 
 
 
 
 
 
titans_pytorch/implicit_mlp_attention.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import torch
4
+ from torch import nn, cat, is_tensor
5
+ import torch.nn.functional as F
6
+ from torch.nn import Module, ModuleList
7
+ from torch.utils._pytree import tree_map
8
+
9
+ from einops.layers.torch import Rearrange
10
+
11
+ from rotary_embedding_torch import RotaryEmbedding
12
+
13
+ # functions
14
+
15
+ def exists(v):
16
+ return v is not None
17
+
18
+ # classes
19
+
20
+ class ImplicitMLPAttention(Module):
21
+ def __init__(
22
+ self,
23
+ dim,
24
+ mlp_hiddens: tuple[int, ...],
25
+ *,
26
+ activation = nn.SiLU(),
27
+ heads = 8,
28
+ talking_heads = True,
29
+ prenorm = True,
30
+ keys_rmsnorm = True # https://openreview.net/forum?id=HkztQWZfl2
31
+ ):
32
+ super().__init__()
33
+ assert isinstance(mlp_hiddens, tuple) and len(mlp_hiddens) >= 2
34
+ dim_mlp_in, *dim_mlp_inner, dim_mlp_out = mlp_hiddens
35
+
36
+ self.norm = nn.RMSNorm(dim) if prenorm else nn.Identity()
37
+
38
+ dim_query_inner = dim_mlp_in * heads
39
+ self.to_queries = nn.Linear(dim, dim_query_inner, bias = False)
40
+
41
+ # keys and values
42
+
43
+ self.rotary_embed = RotaryEmbedding(min(mlp_hiddens)) # just use the minimum dimension, the rest is partially rotaried
44
+
45
+ # each key value forms an implicit weight (memory) of (dim_key, dim_values)
46
+ # chaining them would then be the implicit MLP from TTT / Titans
47
+
48
+ self.keys = ModuleList([])
49
+ self.key_norms = ModuleList([])
50
+
51
+ self.values = ModuleList([])
52
+
53
+ for dim_in, dim_out in zip(mlp_hiddens[:-1], mlp_hiddens[1:]):
54
+
55
+ dim_keys_inner = dim_in * heads
56
+ dim_values_inner = dim_out * heads
57
+
58
+ keys = nn.Linear(dim, dim_keys_inner, bias = False)
59
+ key_norms = nn.RMSNorm(dim_in) if keys_rmsnorm else nn.Identity()
60
+
61
+ values = nn.Linear(dim, dim_values_inner, bias = False)
62
+
63
+ self.keys.append(keys)
64
+ self.key_norms.append(key_norms)
65
+
66
+ self.values.append(values)
67
+
68
+ self.activation = activation
69
+
70
+ # talking head - Shazeer et al.
71
+
72
+ self.talking_heads = nn.Identity()
73
+
74
+ if talking_heads and len(dim_mlp_inner) > 0:
75
+ self.talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
76
+ nn.init.dirac_(self.talking_heads.weight)
77
+
78
+ # split merging of heads
79
+
80
+ self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads)
81
+ self.merge_heads = Rearrange('b h n d -> b n (h d)')
82
+
83
+ self.to_out = nn.Linear(dim_mlp_out * heads, dim, bias = False)
84
+
85
+ def forward(
86
+ self,
87
+ tokens,
88
+ cache = None,
89
+ return_kv_cache = False
90
+ ):
91
+ batch, seq_len, device = *tokens.shape[:2], tokens.device
92
+
93
+ tokens = self.norm(tokens)
94
+
95
+ queries = self.to_queries(tokens)
96
+
97
+ keys = [fn(tokens) for fn in self.keys]
98
+ values = [fn(tokens) for fn in self.values]
99
+
100
+ # split heads for input as well as all keys, values that form the implicit weights
101
+
102
+ queries, keys, values = tree_map(self.split_heads, (queries, keys, values))
103
+
104
+ # maybe norm all keys
105
+
106
+ keys = [norm(k) for norm, k in zip(self.key_norms, keys)]
107
+
108
+ # cache
109
+
110
+ if exists(cache):
111
+ cache_keys, cache_values = cache
112
+
113
+ keys = [cat(args, dim = -2) for args in zip(cache_keys, keys)]
114
+ values = [cat(args, dim = -2) for args in zip(cache_values, values)]
115
+
116
+ # attend
117
+
118
+ def attend(q, k, v):
119
+ q, k = self.rotary_embed.rotate_queries_with_cached_keys(q, k)
120
+
121
+ return F.scaled_dot_product_attention(q, k, v, is_causal = True)
122
+
123
+ # implicit memory mlp
124
+
125
+ out = queries
126
+
127
+ for i, (key, value) in enumerate(zip(keys, values), start = 1):
128
+ is_last = i == len(keys)
129
+
130
+ out = attend(out, key, value)
131
+
132
+ if not is_last:
133
+ out = self.talking_heads(out)
134
+ out = self.activation(out)
135
+
136
+ # merge heads
137
+
138
+ out = self.merge_heads(out)
139
+ out = self.to_out(out)
140
+
141
+ if not return_kv_cache:
142
+ return out
143
+
144
+ return out, (keys, values)
145
+
146
+ # 3 layers implicit MLP attention - 64 -> 128 -> 128 -> 64 w/ relu
147
+
148
+ if __name__ == '__main__':
149
+
150
+ implicit_mlp_attn = ImplicitMLPAttention(
151
+ 512,
152
+ (64, 128, 128, 64),
153
+ activation = nn.ReLU()
154
+ )
155
+
156
+ tokens = torch.randn(1, 1024, 512)
157
+
158
+ out, cache = implicit_mlp_attn(tokens)
159
+ out, cache = implicit_mlp_attn(tokens, cache = cache)
160
+
161
+ assert out.shape == tokens.shape
titans_pytorch/mac_transformer.py ADDED
@@ -0,0 +1,921 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Callable
3
+
4
+ from math import ceil
5
+ from copy import deepcopy
6
+ from functools import partial
7
+ from collections import namedtuple
8
+
9
+ import tqdm
10
+
11
+ import torch
12
+ from torch import nn, stack, cat
13
+ import torch.nn.functional as F
14
+ from torch.nn import Module, ModuleList, Linear
15
+
16
+ # flex attention
17
+ # https://pytorch.org/blog/flexattention/
18
+
19
+ flex_attention = None
20
+
21
+ try:
22
+ from torch.nn.attention.flex_attention import flex_attention, create_block_mask
23
+ if torch.cuda.is_available():
24
+ flex_attention = torch.compile(flex_attention)
25
+ except ImportError:
26
+ pass
27
+
28
+ def create_mac_block_mask(seq_len, window_size, persist_mem_len, sliding = False):
29
+
30
+ def create_mac_mask(_, __, q_idx, kv_idx):
31
+ is_persist_mem = kv_idx < persist_mem_len
32
+ kv_without_mem = kv_idx - persist_mem_len
33
+ causal_mask = q_idx >= kv_without_mem
34
+
35
+ if not sliding:
36
+ block_diagonal = (q_idx // window_size) == (kv_without_mem // window_size)
37
+ causal_mask = causal_mask & block_diagonal
38
+ else:
39
+ sliding_mask = (q_idx - kv_without_mem) <= window_size
40
+ causal_mask = causal_mask & sliding_mask
41
+
42
+ return is_persist_mem | (~is_persist_mem & causal_mask)
43
+
44
+ block_mask = create_block_mask(create_mac_mask, B = None, H = None, Q_LEN = seq_len, KV_LEN = seq_len + persist_mem_len, _compile = True)
45
+ return block_mask
46
+
47
+ # einstein notation related
48
+
49
+ from einops import repeat, rearrange, pack, unpack, einsum
50
+ from einops.layers.torch import Rearrange
51
+
52
+ # b - batch
53
+ # n - sequence
54
+ # h - heads
55
+ # d - feature dimension
56
+
57
+ # absolute and relative positions
58
+
59
+ from axial_positional_embedding import ContinuousAxialPositionalEmbedding
60
+ from rotary_embedding_torch import RotaryEmbedding
61
+
62
+ # hyper connections / attend from x-transformers, which handles different queries and key lengths better
63
+
64
+ from x_transformers.attend import Attend
65
+
66
+ from hyper_connections import mc_get_init_and_expand_reduce_stream_functions
67
+
68
+ # proposed neural memory
69
+
70
+ from titans_pytorch.neural_memory import NeuralMemory
71
+
72
+ # constants
73
+
74
+ LinearNoBias = partial(Linear, bias = False)
75
+
76
+ AttnIntermediates = namedtuple('AttnIntermediates', ('value_residual', 'cached_key_values'))
77
+
78
+ # helpers
79
+
80
+ def exists(v):
81
+ return v is not None
82
+
83
+ def default(v, d):
84
+ return v if exists(v) else d
85
+
86
+ def identity(t):
87
+ return t
88
+
89
+ def divisible_by(num, den):
90
+ return (num % den) == 0
91
+
92
+ def round_up_multiple(seq, mult):
93
+ return ceil(seq / mult) * mult
94
+
95
+ def round_down_multiple(seq, mult):
96
+ return seq // mult * mult
97
+
98
+ def pack_with_inverse(t, pattern):
99
+ packed, packed_shape = pack(t, pattern)
100
+
101
+ def inverse(out, inv_pattern = None):
102
+ return unpack(out, packed_shape, default(inv_pattern, pattern))
103
+
104
+ return packed, inverse
105
+
106
+ def pad_at_dim(t, pad, dim = -1, value = 0.):
107
+ dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
108
+ zeros = ((0, 0) * dims_from_right)
109
+ return F.pad(t, (*zeros, *pad), value = value)
110
+
111
+ def pad_and_segment_with_inverse(
112
+ seq,
113
+ segment_len,
114
+ fold_into_batch = True,
115
+ inverse_remove_pad = True
116
+ ):
117
+ batch, seq_len = seq.shape[:2]
118
+ next_seq_len_mult = round_up_multiple(seq_len, segment_len)
119
+
120
+ padding = next_seq_len_mult - seq_len
121
+ needs_pad = padding > 0
122
+
123
+ if needs_pad:
124
+ seq = F.pad(seq, (0, 0, 0, padding))
125
+
126
+ if fold_into_batch:
127
+ seq = rearrange(seq, 'b (w n) d -> (b w) n d', n = segment_len)
128
+
129
+ def inverse(out):
130
+
131
+ if fold_into_batch:
132
+ out = rearrange(out, '(b w) ... n d -> b ... (w n) d', b = batch)
133
+
134
+ if needs_pad and inverse_remove_pad:
135
+ out = out[..., :-padding, :]
136
+
137
+ return out
138
+
139
+ return seq, inverse
140
+
141
+ # sampling related
142
+
143
+ def log(t, eps = 1e-20):
144
+ return torch.log(t.clamp(min = eps))
145
+
146
+ def gumbel_noise(t):
147
+ noise = torch.rand_like(t)
148
+ return -log(-log(noise))
149
+
150
+ def gumbel_sample(t, temperature = 1.):
151
+ if temperature > 0.:
152
+ t = t / temperature + gumbel_noise(t)
153
+ return t.argmax(dim = -1, keepdim = True)
154
+
155
+ # min_p
156
+ # https://arxiv.org/abs/2407.01082
157
+
158
+ def min_p_filter(logits, min_p = 0.1):
159
+ probs = logits.softmax(dim = -1)
160
+ max_probs = probs.amax(dim = -1, keepdim = True)
161
+ limit = min_p * max_probs
162
+ return torch.where(probs < limit, float('-inf'), logits)
163
+
164
+ # feedforward and attention
165
+
166
+ class GEGLU(Module):
167
+ def forward(self, x):
168
+ x, gate = x.chunk(2, dim = -1)
169
+ return F.silu(gate) * x
170
+
171
+ def FeedForward(dim, mult = 4):
172
+ dim_inner = int(dim * mult * 2 / 3)
173
+
174
+ return nn.Sequential(
175
+ nn.RMSNorm(dim),
176
+ nn.Linear(dim, dim_inner * 2),
177
+ GEGLU(),
178
+ nn.Linear(dim_inner, dim)
179
+ )
180
+
181
+ class SegmentedAttention(Module):
182
+ def __init__(
183
+ self,
184
+ dim,
185
+ segment_len,
186
+ num_persist_mem_tokens = 0,
187
+ num_longterm_mem_tokens = 0,
188
+ dim_head = 64,
189
+ heads = 8,
190
+ sliding = False,
191
+ accept_value_residual = False,
192
+ attend_kwargs: dict = dict(),
193
+ use_flex_attn = False
194
+ ):
195
+ super().__init__()
196
+ self.norm = nn.RMSNorm(dim)
197
+
198
+ dim_inner = dim_head * heads
199
+
200
+ self.rotary_emb = RotaryEmbedding(dim_head)
201
+
202
+ self.attend = Attend(causal = True, **attend_kwargs)
203
+
204
+ self.to_qkv = LinearNoBias(dim, dim_inner * 3)
205
+ self.to_out = LinearNoBias(dim_inner, dim)
206
+
207
+ self.to_learned_v_mix = nn.Sequential(
208
+ nn.Linear(dim, heads),
209
+ Rearrange('b n h -> b h n 1'),
210
+ nn.Sigmoid()
211
+ ) if accept_value_residual else None
212
+
213
+ self.segment_len = segment_len
214
+ self.num_longterm_mem_tokens = num_longterm_mem_tokens
215
+
216
+ total_segment_len = segment_len + num_longterm_mem_tokens
217
+ self.total_segment_len = total_segment_len
218
+
219
+ self.sliding = sliding # sliding window attn - doubt their non-sliding results being the best. local attention with overlapping windows is very strong
220
+
221
+ self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads)
222
+ self.merge_heads = Rearrange('b h n d -> b n (h d)')
223
+
224
+ self.persistent_memory = nn.Parameter(torch.zeros(2, heads, num_persist_mem_tokens, dim_head))
225
+
226
+ # flex attn related
227
+
228
+ assert not (use_flex_attn and not exists(flex_attention)), 'you need to be on the latest pytorch with a cuda device available'
229
+ self.use_flex_attn = use_flex_attn
230
+
231
+ self.segment_len = segment_len
232
+ self.num_persist_mem_tokens = num_persist_mem_tokens
233
+
234
+ def forward_inference(
235
+ self,
236
+ token,
237
+ cache,
238
+ value_residual = None,
239
+ output_gating = None,
240
+ ):
241
+ batch = token.shape[0]
242
+
243
+ # attention
244
+
245
+ token = self.norm(token)
246
+
247
+ q, k, v = self.to_qkv(token).chunk(3, dim = -1)
248
+ q, k, v = map(self.split_heads, (q, k, v))
249
+
250
+ # value residual
251
+
252
+ orig_v = v
253
+
254
+ if exists(self.to_learned_v_mix):
255
+ mix = self.to_learned_v_mix(token)
256
+ v = v.lerp(value_residual, mix)
257
+
258
+ # caching
259
+
260
+ ck, cv = cache
261
+ k = cat((ck, k), dim = -2)
262
+ v = cat((cv, v), dim = -2)
263
+
264
+ next_cache = (k, v)
265
+
266
+ # relative positions
267
+
268
+ q, k = self.rotary_emb.rotate_queries_with_cached_keys(q, k)
269
+
270
+ # fold
271
+
272
+ q, k, v = tuple(rearrange(t, 'b h n d -> b h n d') for t in (q, k, v))
273
+
274
+ # take care of persistent memory key / values
275
+
276
+ pmk, pmv = repeat(self.persistent_memory, 'kv ... -> kv b ...', b = k.shape[0])
277
+
278
+ # persistent memory
279
+
280
+ k = cat((pmk, k), dim = -2)
281
+ v = cat((pmv, v), dim = -2)
282
+
283
+ # attention
284
+
285
+ out, _ = self.attend(q, k, v)
286
+
287
+ out = self.merge_heads(out)
288
+
289
+ out = self.to_out(out)
290
+
291
+ if exists(output_gating):
292
+ out = out * output_gating
293
+
294
+ return out, AttnIntermediates(orig_v, next_cache)
295
+
296
+ def forward_flex(
297
+ self,
298
+ seq,
299
+ value_residual = None,
300
+ flex_attn_fn: Callable | None = None,
301
+ output_gating = None,
302
+ cache = None
303
+ ):
304
+
305
+ assert not (exists(value_residual) ^ exists(self.to_learned_v_mix))
306
+
307
+ batch, seq_len = seq.shape[:2]
308
+
309
+ # attention
310
+
311
+ seq = self.norm(seq)
312
+
313
+ q, k, v = self.to_qkv(seq).chunk(3, dim = -1)
314
+ q, k, v = map(self.split_heads, (q, k, v))
315
+
316
+ # value residual
317
+
318
+ orig_v = v
319
+
320
+ if exists(self.to_learned_v_mix):
321
+ mix = self.to_learned_v_mix(seq)
322
+ v = v.lerp(value_residual, mix)
323
+
324
+ # caching
325
+
326
+ next_cache = (k, v)
327
+
328
+ # take care of persistent memory key / values
329
+
330
+ pmk, pmv = repeat(self.persistent_memory, 'kv h n d -> kv b h n d', b = batch)
331
+
332
+ # relative positions
333
+
334
+ q, k = self.rotary_emb.rotate_queries_with_cached_keys(q, k)
335
+
336
+ # persistent memory
337
+
338
+ k = cat((pmk, k), dim = -2)
339
+ v = cat((pmv, v), dim = -2)
340
+
341
+ # prep flex attention
342
+
343
+ if not exists(flex_attn_fn):
344
+ block_mask = create_mac_block_mask(seq_len, self.total_segment_len, self.num_persist_mem_tokens, self.sliding)
345
+
346
+ flex_attn_fn = partial(flex_attention, block_mask = block_mask)
347
+
348
+ # attention
349
+
350
+ out = flex_attn_fn(q, k, v)
351
+
352
+ out = self.merge_heads(out)
353
+
354
+ out = self.to_out(out)
355
+
356
+ if exists(output_gating):
357
+ out = out * output_gating
358
+
359
+ return out, AttnIntermediates(orig_v, next_cache)
360
+
361
+ def forward(
362
+ self,
363
+ seq,
364
+ value_residual = None,
365
+ flex_attn_fn: Callable | None = None,
366
+ disable_flex_attn = False,
367
+ output_gating = None,
368
+ cache = None
369
+ ):
370
+ is_inferencing = exists(cache)
371
+
372
+ if is_inferencing:
373
+ assert seq.shape[-2] == 1
374
+ return self.forward_inference(seq, cache, value_residual, output_gating = output_gating)
375
+
376
+ if seq.is_cuda and self.use_flex_attn and not disable_flex_attn:
377
+ return self.forward_flex(seq, value_residual, flex_attn_fn, output_gating = output_gating, cache = cache)
378
+
379
+ assert not (exists(value_residual) ^ exists(self.to_learned_v_mix))
380
+
381
+ segment_len, num_longterm_mem_tokens = self.segment_len, self.num_longterm_mem_tokens
382
+ total_segment_len = segment_len + num_longterm_mem_tokens
383
+
384
+ batch, seq_len = seq.shape[:2]
385
+
386
+ # auto pad to multiple
387
+
388
+ seq, inverse_segment = pad_and_segment_with_inverse(seq, total_segment_len, fold_into_batch = False)
389
+
390
+ # attention
391
+
392
+ seq = self.norm(seq)
393
+
394
+ q, k, v = self.to_qkv(seq).chunk(3, dim = -1)
395
+ q, k, v = map(self.split_heads, (q, k, v))
396
+
397
+ # value residual
398
+
399
+ orig_v = v
400
+
401
+ if exists(self.to_learned_v_mix):
402
+ mix = self.to_learned_v_mix(seq)
403
+ v = v.lerp(value_residual, mix)
404
+
405
+ # caching
406
+
407
+ next_cache = tuple(map(inverse_segment, (k, v)))
408
+
409
+ # relative positions
410
+
411
+ q, k = self.rotary_emb.rotate_queries_with_cached_keys(q, k)
412
+
413
+ # fold
414
+
415
+ q, k, v = tuple(rearrange(t, 'b h (w n) d -> (b w) h n d', n = total_segment_len) for t in (q, k, v))
416
+
417
+ # maybe sliding for cpu
418
+
419
+ attend_kwargs = dict()
420
+
421
+ if self.sliding:
422
+ k, v = tuple(rearrange(t, '(b w) ... -> b w ...', b = batch) for t in (k, v))
423
+ k, v = tuple(pad_at_dim(t, (1, 0), value = 0., dim = 1) for t in (k, v))
424
+ k = cat((k[:, :-1], k[:, 1:]), dim = -2)
425
+ v = cat((v[:, :-1], v[:, 1:]), dim = -2)
426
+ k, v = tuple(rearrange(t, 'b w ... -> (b w) ...') for t in (k, v))
427
+
428
+ # take care of masking
429
+
430
+ idx = torch.arange(seq.shape[-2], device = seq.device)
431
+ q_idx = rearrange(idx, '(w n) -> w n', n = total_segment_len)
432
+ k_idx = pad_at_dim(q_idx, (1, 0), dim = 0, value = -1e4)
433
+ k_idx = cat((k_idx[:-1], k_idx[1:]), dim = -1)
434
+
435
+ q_idx = rearrange(q_idx, 'w i -> w i 1')
436
+ k_idx = rearrange(k_idx, 'w j -> w 1 j')
437
+
438
+ sliding_mask = (q_idx - k_idx) <= total_segment_len
439
+ sliding_mask = F.pad(sliding_mask, (self.num_persist_mem_tokens, 0), value = True)
440
+
441
+ sliding_mask = repeat(sliding_mask, 'w i j -> (b w) 1 i j', b = batch)
442
+ attend_kwargs.update(mask = sliding_mask)
443
+
444
+ # take care of persistent memory key / values
445
+
446
+ pmk, pmv = repeat(self.persistent_memory, 'kv ... -> kv b ...', b = k.shape[0])
447
+
448
+ # persistent memory
449
+
450
+ k = cat((pmk, k), dim = -2)
451
+ v = cat((pmv, v), dim = -2)
452
+
453
+ # attention
454
+
455
+ out, _ = self.attend(q, k, v, **attend_kwargs)
456
+
457
+ out = self.merge_heads(out)
458
+
459
+ out = self.to_out(out)
460
+
461
+ out = rearrange(out, '(b w) n d -> b (w n) d', b = batch)
462
+
463
+ out = inverse_segment(out)
464
+
465
+ if exists(output_gating):
466
+ out = out * output_gating
467
+
468
+ return out, AttnIntermediates(orig_v, next_cache)
469
+
470
+ # MAC transformer
471
+
472
+ class MemoryAsContextTransformer(Module):
473
+ def __init__(
474
+ self,
475
+ *,
476
+ num_tokens,
477
+ dim,
478
+ depth,
479
+ segment_len,
480
+ neural_memory_segment_len = None,
481
+ neural_mem_gate_attn_output = False,
482
+ neural_memory_add_value_residual = False,
483
+ num_longterm_mem_tokens = 0,
484
+ num_persist_mem_tokens = 0,
485
+ neural_memory_batch_size = None,
486
+ neural_memory_qkv_receives_diff_views = False,
487
+ dim_head = 64,
488
+ heads = 8,
489
+ ff_mult = 4,
490
+ num_residual_streams = 4,
491
+ neural_memory_model: Module | None = None,
492
+ neural_memory_kwargs: dict = dict(),
493
+ neural_memory_layers: tuple[int, ...] | None = None,
494
+ use_flex_attn = False,
495
+ sliding_window_attn = False,
496
+ neural_mem_weight_residual = False,
497
+ token_emb: Module | None = None,
498
+ ):
499
+ super().__init__()
500
+
501
+ if not exists(token_emb):
502
+ token_emb = nn.Embedding(num_tokens, dim)
503
+
504
+ self.token_emb = token_emb
505
+
506
+ # absolute positions
507
+
508
+ self.axial_pos_emb = ContinuousAxialPositionalEmbedding(dim = dim, num_axial_dims = 2)
509
+
510
+ # long term mem tokens
511
+
512
+ self.segment_len = segment_len
513
+
514
+ self.num_longterm_mem_tokens = num_longterm_mem_tokens
515
+ has_longterm_mems = num_longterm_mem_tokens > 0
516
+
517
+ self.longterm_mems = nn.Parameter(torch.randn(num_longterm_mem_tokens, dim) * 0.02)
518
+
519
+ # maybe sliding window attn
520
+
521
+ self.sliding_window_attn = sliding_window_attn
522
+ self.attn_window_size = segment_len + num_longterm_mem_tokens
523
+
524
+ # hyper connection
525
+
526
+ init_hyper_conn, self.expand_streams, self.reduce_streams = mc_get_init_and_expand_reduce_stream_functions(num_residual_streams, dim = dim, add_stream_embed = True, disable = num_residual_streams == 1)
527
+
528
+ self.layers = ModuleList([])
529
+
530
+ self.neural_memory_segment_len = default(neural_memory_segment_len, num_longterm_mem_tokens + segment_len)
531
+
532
+ layers = tuple(range(1, depth + 1))
533
+
534
+ neural_memory_layers = default(neural_memory_layers, layers)
535
+
536
+ # weight residual related
537
+
538
+ self.neural_mem_weight_residual = neural_mem_weight_residual
539
+ is_first_neural_mem = True
540
+
541
+ # mem, attn, and feedforward layers
542
+
543
+ for layer in layers:
544
+ is_first = layer == 1
545
+
546
+ # attention and feedforward
547
+
548
+ attn = SegmentedAttention(
549
+ dim = dim,
550
+ dim_head = dim_head,
551
+ heads = heads,
552
+ segment_len = segment_len,
553
+ use_flex_attn = use_flex_attn,
554
+ accept_value_residual = not is_first,
555
+ num_longterm_mem_tokens = num_longterm_mem_tokens,
556
+ num_persist_mem_tokens = num_persist_mem_tokens,
557
+ sliding = sliding_window_attn
558
+ )
559
+
560
+ mem = None
561
+ mem_qkv_layer_selector = None
562
+ mem_hyper_conn = None
563
+
564
+ if layer in neural_memory_layers:
565
+ mem_hyper_conn = init_hyper_conn(add_branch_out_to_residual = not neural_mem_gate_attn_output)
566
+
567
+ if not is_first and neural_memory_qkv_receives_diff_views:
568
+ num_layer_choices = (layer - 1) * 4 + 1 # for each layer, have memory input select from attn inp, attn out, ff inp, and ff out - plus one for the current point in the residual stream (memory input)
569
+
570
+ mem_qkv_layer_selector = nn.Sequential(
571
+ nn.RMSNorm(dim),
572
+ nn.Linear(dim, 3 * num_layer_choices),
573
+ Rearrange('... (views layers) -> views ... layers', views = 3),
574
+ nn.Softmax(dim = -1)
575
+ )
576
+
577
+ mem = NeuralMemory(
578
+ dim = dim,
579
+ chunk_size = self.neural_memory_segment_len,
580
+ batch_size = neural_memory_batch_size,
581
+ model = deepcopy(neural_memory_model),
582
+ qkv_receives_diff_views = True,
583
+ accept_weight_residual = neural_mem_weight_residual and not is_first_neural_mem,
584
+ **neural_memory_kwargs
585
+ )
586
+
587
+ is_first_neural_mem = False
588
+
589
+ ff = FeedForward(dim = dim, mult = ff_mult)
590
+
591
+ self.layers.append(ModuleList([
592
+ mem_hyper_conn,
593
+ init_hyper_conn(),
594
+ init_hyper_conn(),
595
+ mem_qkv_layer_selector,
596
+ mem,
597
+ attn,
598
+ ff,
599
+ ]))
600
+
601
+ self.norm = nn.RMSNorm(dim)
602
+
603
+ self.to_logits = LinearNoBias(dim, num_tokens)
604
+
605
+ # whether to gate the attention output with the retrieved memories
606
+
607
+ self.gate_attn_output = neural_mem_gate_attn_output
608
+
609
+ # zero for maybe aux loss + device
610
+
611
+ self.register_buffer('zero', torch.tensor(0.), persistent = False)
612
+
613
+ # flex attn related
614
+
615
+ assert not (use_flex_attn and not exists(flex_attention)), 'you need to be on the latest pytorch with a cuda device available'
616
+ self.use_flex_attn = use_flex_attn
617
+
618
+ self.num_persist_mem_tokens = num_persist_mem_tokens
619
+
620
+ def seq_index_is_longterm(
621
+ self,
622
+ seq_index
623
+ ):
624
+ total_segment_len, segment_len = self.attn_window_size, self.segment_len
625
+ return ((seq_index % total_segment_len + 1) - segment_len) > 0
626
+
627
+ def seq_len_with_longterm_mem(
628
+ self,
629
+ seq_len
630
+ ):
631
+ assert seq_len > 0
632
+
633
+ segment_len, num_mem = self.segment_len, self.num_longterm_mem_tokens
634
+ return ((seq_len - 1) // segment_len) * num_mem + seq_len
635
+
636
+ @torch.no_grad()
637
+ def sample(
638
+ self,
639
+ prompt: Tensor,
640
+ seq_len: int,
641
+ temperature = 1.5,
642
+ filter_fn: Callable = min_p_filter,
643
+ filter_kwargs: dict = dict(
644
+ min_p = 0.1,
645
+ ),
646
+ show_progress = True,
647
+ use_cache = False
648
+ ):
649
+ was_training = self.training
650
+ self.eval()
651
+
652
+ prompt_seq_len, out = prompt.shape[-1], prompt.clone()
653
+ sample_num_times = max(0, seq_len - prompt_seq_len)
654
+
655
+ # cache for axial pos, attention, and neural memory
656
+
657
+ cache = None
658
+ factorized_pos_emb = None
659
+
660
+ # precompute factorized pos emb
661
+
662
+ if use_cache:
663
+ seq_len_with_mem = self.seq_len_with_longterm_mem(seq_len)
664
+
665
+ axial_dims = self.axial_pos_emb.maybe_derive_outer_dim(seq_len_with_mem, (self.neural_memory_segment_len,))
666
+
667
+ factorized_pos_emb = self.axial_pos_emb(axial_dims, return_factorized = True)
668
+
669
+ # sample
670
+
671
+ with tqdm.tqdm(total = sample_num_times, disable = not show_progress) as pbar:
672
+
673
+ while out.shape[-1] < seq_len:
674
+
675
+ logits, next_cache = self.forward(
676
+ out,
677
+ disable_flex_attn = True,
678
+ cache = cache,
679
+ return_cache = True,
680
+ factorized_pos_emb = factorized_pos_emb
681
+ )
682
+
683
+ if use_cache:
684
+ cache = next_cache
685
+
686
+ if not exists(logits):
687
+ continue
688
+
689
+ logits = logits[:, -1]
690
+
691
+ logits = filter_fn(logits, **filter_kwargs)
692
+ sample = gumbel_sample(logits, temperature = temperature)
693
+
694
+ out = torch.cat((out, sample), dim = -1)
695
+ pbar.update(1)
696
+
697
+ self.train(was_training)
698
+
699
+ return out[..., prompt_seq_len:]
700
+
701
+ def forward(
702
+ self,
703
+ x,
704
+ return_loss = False,
705
+ return_loss_breakdown = False,
706
+ disable_flex_attn = False,
707
+ cache = None,
708
+ return_cache = False,
709
+ factorized_pos_emb = None
710
+ ):
711
+
712
+ if return_loss:
713
+ x, labels = x[:, :-1], x[:, 1:]
714
+
715
+ # math
716
+
717
+ batch, seq_len, neural_mem_segment_len, segment_len, num_longterm_mem_tokens, attn_window_size = *x.shape, self.neural_memory_segment_len, self.segment_len, self.num_longterm_mem_tokens, self.attn_window_size
718
+
719
+ seq_len_with_mem = self.seq_len_with_longterm_mem(seq_len)
720
+
721
+ # token embedding
722
+
723
+ x = self.token_emb(x)
724
+
725
+ # intersperse longterm memory
726
+
727
+ x, inverse_segment = pad_and_segment_with_inverse(x, segment_len, inverse_remove_pad = False)
728
+
729
+ mems = repeat(self.longterm_mems, 'n d -> b n d', b = x.shape[0])
730
+ x, inverse_pack_mems = pack_with_inverse((x, mems), 'b * d')
731
+
732
+ x = inverse_segment(x)
733
+
734
+ # splice out unneeded tokens from padding for longterm mems
735
+
736
+ x = x[:, :seq_len_with_mem]
737
+
738
+ # apply axial positional embedding
739
+ # so intra and inter segment can be more easily discerned by the network
740
+
741
+ pos_emb = self.axial_pos_emb.forward_with_seq_len(seq_len_with_mem, (neural_mem_segment_len,), factorized = factorized_pos_emb)
742
+
743
+ x = x + pos_emb
744
+
745
+ # prep flex attention
746
+
747
+ use_flex_attn = x.is_cuda and self.use_flex_attn and not disable_flex_attn
748
+
749
+ flex_attn_fn = None
750
+
751
+ if use_flex_attn:
752
+ block_mask = create_mac_block_mask(seq_len_with_mem, self.attn_window_size, self.num_persist_mem_tokens, self.sliding_window_attn)
753
+ flex_attn_fn = partial(flex_attention, block_mask = block_mask)
754
+
755
+ # kv caching
756
+
757
+ is_inferencing = exists(cache)
758
+
759
+ if not exists(cache):
760
+ cache = (seq_len_with_mem - 1, None, None)
761
+
762
+ inference_seq_index, kv_caches, neural_mem_caches = cache
763
+
764
+ kv_caches = iter(default(kv_caches, []))
765
+ neural_mem_caches = iter(default(neural_mem_caches, []))
766
+
767
+ next_kv_caches = []
768
+ next_neural_mem_caches = []
769
+
770
+ # value residual
771
+
772
+ value_residual = None
773
+
774
+ # neural mem weight residual
775
+
776
+ mem_weight_residual = None
777
+
778
+ # layers for the neural mem to select the qkv inputs from
779
+
780
+ mem_input_layers = []
781
+
782
+ # when inferencing, only do one token at a time
783
+
784
+ if is_inferencing:
785
+ ind = inference_seq_index
786
+ x = x[:, ind:(ind + 1)]
787
+
788
+ # expand and reduce streams for hyper connections
789
+
790
+ x = self.expand_streams(x)
791
+
792
+ for mem_hyper_conn, attn_hyper_conn, ff_hyper_conn, mem_qkv_layer_selector, mem, attn, ff in self.layers:
793
+
794
+ retrieved = None
795
+ attn_out_gates = None
796
+ next_neural_mem_cache = None
797
+
798
+ # maybe neural memory
799
+
800
+ if exists(mem):
801
+
802
+ mem_input, add_residual = mem_hyper_conn(x)
803
+
804
+ if not exists(mem_qkv_layer_selector):
805
+ qkv_mem_input = stack((mem_input, mem_input, mem_input))
806
+ else:
807
+ layers_to_choose_from = stack((mem_input, *mem_input_layers))
808
+
809
+ # let the current `mem_input` select the 3 layers for qkv
810
+
811
+ selected = mem_qkv_layer_selector(mem_input)
812
+
813
+ qkv_mem_input = einsum(layers_to_choose_from, selected, 'l b n d, v b n l -> v b n d')
814
+
815
+ retrieved, next_neural_mem_cache = mem.forward(
816
+ qkv_mem_input,
817
+ state = next(neural_mem_caches, None),
818
+ prev_weights = mem_weight_residual
819
+ )
820
+
821
+ if self.neural_mem_weight_residual:
822
+ mem_weight_residual = next_neural_mem_cache.updates
823
+
824
+ if self.gate_attn_output:
825
+ attn_out_gates = retrieved.sigmoid()
826
+ else:
827
+ x = add_residual(retrieved)
828
+
829
+ # attention
830
+
831
+ attn_in, add_residual = attn_hyper_conn(x)
832
+
833
+ mem_input_layers.append(attn_in)
834
+
835
+ attn_out, (values, next_kv_cache) = attn(
836
+ attn_in,
837
+ value_residual = value_residual,
838
+ disable_flex_attn = disable_flex_attn,
839
+ flex_attn_fn = flex_attn_fn,
840
+ output_gating = attn_out_gates,
841
+ cache = next(kv_caches, None)
842
+ )
843
+
844
+ mem_input_layers.append(attn_out)
845
+
846
+ value_residual = default(value_residual, values)
847
+
848
+ x = add_residual(attn_out)
849
+
850
+ # caches
851
+
852
+ next_kv_caches.append(next_kv_cache)
853
+ next_neural_mem_caches.append(next_neural_mem_cache)
854
+
855
+ # feedforward
856
+
857
+ ff_in, add_ff_residual = ff_hyper_conn(x)
858
+
859
+ mem_input_layers.append(ff_in)
860
+
861
+ ff_out = ff(ff_in)
862
+
863
+ mem_input_layers.append(ff_out)
864
+
865
+ x = add_ff_residual(ff_out)
866
+
867
+ # taking care of cache first
868
+ # for early return when processing long term mem tokens during inference
869
+
870
+ if return_cache:
871
+ next_kv_caches = stack([stack(kv_cache) for kv_cache in next_kv_caches])
872
+
873
+ # handle kv cache length depending on local attention type
874
+
875
+ next_kv_caches = next_kv_caches[..., -attn_window_size:, :]
876
+
877
+ kv_cache_length = next_kv_caches.shape[-2]
878
+
879
+ if not self.sliding_window_attn and divisible_by(kv_cache_length, attn_window_size):
880
+ next_kv_caches = next_kv_caches[..., 0:0, :]
881
+
882
+ next_cache = (
883
+ inference_seq_index + 1,
884
+ next_kv_caches,
885
+ next_neural_mem_caches
886
+ )
887
+
888
+ is_longterm_mem = self.seq_index_is_longterm(inference_seq_index)
889
+
890
+ if is_inferencing and is_longterm_mem:
891
+ return None, next_cache
892
+
893
+ # hyper connection reducing of streams
894
+
895
+ x = self.reduce_streams(x)
896
+
897
+ # excise out the memories
898
+
899
+ if not is_inferencing:
900
+
901
+ x, inverse_segment = pad_and_segment_with_inverse(x, attn_window_size, inverse_remove_pad = False)
902
+
903
+ x, _ = inverse_pack_mems(x)
904
+
905
+ x = inverse_segment(x)
906
+
907
+ x = x[:, :seq_len]
908
+
909
+ # to logits
910
+
911
+ x = self.norm(x)
912
+
913
+ logits = self.to_logits(x)
914
+
915
+ if not return_loss:
916
+ if not return_cache:
917
+ return logits
918
+
919
+ return logits, next_cache
920
+
921
+ return F.cross_entropy(rearrange(logits, 'b n l -> b l n'), labels)
titans_pytorch/nested_attention.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import torch
4
+ from torch import nn, cat, is_tensor
5
+ import torch.nn.functional as F
6
+ from torch.nn import Module, ModuleList
7
+ from torch.utils._pytree import tree_map
8
+
9
+ from einops.layers.torch import Rearrange
10
+
11
+ from rotary_embedding_torch import RotaryEmbedding
12
+
13
+ # functions
14
+
15
+ def exists(v):
16
+ return v is not None
17
+
18
+ # classes
19
+
20
+ class NestedAttention(Module):
21
+ def __init__(
22
+ self,
23
+ dim,
24
+ *,
25
+ dim_head = 64,
26
+ heads = 8,
27
+ prenorm = True,
28
+ keys_rmsnorm = True # https://openreview.net/forum?id=HkztQWZfl2
29
+ ):
30
+ super().__init__()
31
+
32
+ self.norm = nn.RMSNorm(dim) if prenorm else nn.Identity()
33
+
34
+ dim_inner = dim_head * heads
35
+ self.to_queries = nn.Linear(dim, dim_inner, bias = False)
36
+
37
+ # keys and values
38
+
39
+ self.rotary_embed = RotaryEmbedding(dim_head)
40
+
41
+ self.to_keys = nn.Linear(dim, dim_inner * 3, bias = False)
42
+ self.to_values = nn.Linear(dim, dim_inner * 3, bias = False)
43
+
44
+ self.key_norms = ModuleList([nn.RMSNorm(dim_head) for _ in range(3)])
45
+ self.nested_key_norm = nn.RMSNorm(dim_head)
46
+
47
+ self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads)
48
+ self.merge_heads = Rearrange('b h n d -> b n (h d)')
49
+
50
+ self.to_out = nn.Linear(dim_inner, dim, bias = False)
51
+
52
+ def forward(
53
+ self,
54
+ tokens,
55
+ cache = None,
56
+ return_kv_cache = False
57
+ ):
58
+ batch, seq_len, device = *tokens.shape[:2], tokens.device
59
+
60
+ tokens = self.norm(tokens)
61
+
62
+ queries = self.to_queries(tokens)
63
+
64
+ keys = self.to_keys(tokens).chunk(3, dim = -1)
65
+ values = self.to_values(tokens).chunk(3, dim = -1)
66
+
67
+ # split heads for input as well as all keys, values that form the implicit weights
68
+
69
+ queries, keys, values = tree_map(self.split_heads, (queries, keys, values))
70
+
71
+ # maybe norm all keys
72
+
73
+ keys = [norm(k) for norm, k in zip(self.key_norms, keys)]
74
+
75
+ # cache
76
+
77
+ if exists(cache):
78
+ (cache_keys, cache_values), (cache_nested_keys, cache_nested_values) = cache
79
+
80
+ keys = [cat(args, dim = -2) for args in zip(cache_keys, keys)]
81
+ values = [cat(args, dim = -2) for args in zip(cache_values, values)]
82
+
83
+ # attend
84
+
85
+ def attend(q, k, v):
86
+ q, k = self.rotary_embed.rotate_queries_with_cached_keys(q, k)
87
+
88
+ return F.scaled_dot_product_attention(q, k, v, is_causal = True)
89
+
90
+ # nested attention
91
+
92
+ nq, nk, nv = [attend(queries, key, value) for key, value in zip(keys, values)]
93
+
94
+ nk = self.nested_key_norm(nk)
95
+
96
+ if exists(cache):
97
+ nk = cat((cache_nested_keys, nk), dim = -2)
98
+ nv = cat((cache_nested_values, nv), dim = -2)
99
+
100
+ out = attend(nq, nk, nv)
101
+
102
+ # merge heads
103
+
104
+ out = self.merge_heads(out)
105
+
106
+ out = self.to_out(out)
107
+
108
+ if not return_kv_cache:
109
+ return out
110
+
111
+ return out, ((keys, values), (nk, nv))
112
+
113
+ if __name__ == '__main__':
114
+
115
+ nested_attn = NestedAttention(512)
116
+
117
+ tokens = torch.randn(1, 1024, 512)
118
+
119
+ out1, cache = nested_attn(tokens)
120
+ out2, cache = nested_attn(tokens[:, -1:], cache = cache)
121
+
122
+ assert out1.shape == tokens.shape
123
+ assert out2.shape == (1, 1, 512)
train_implicit_mlp_attn.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = [
3
+ # "accelerate",
4
+ # "titans-pytorch",
5
+ # "tqdm"
6
+ # ]
7
+ # ///
8
+
9
+ import math
10
+ import gzip
11
+ import random
12
+ import tqdm
13
+ import numpy as np
14
+
15
+ import torch
16
+ from torch.optim import Adam
17
+ from torch import nn, Tensor
18
+ from torch.nn import Module, ModuleList
19
+ import torch.nn.functional as F
20
+ from torch.utils.data import DataLoader, Dataset
21
+
22
+ from einops import rearrange
23
+
24
+ from titans_pytorch.implicit_mlp_attention import ImplicitMLPAttention
25
+ from titans_pytorch.nested_attention import NestedAttention
26
+
27
+ from accelerate import Accelerator
28
+
29
+ # constants
30
+
31
+ NUM_BATCHES = int(1e5)
32
+ BATCH_SIZE = 4
33
+ GRAD_ACCUM_EVERY = 4
34
+ LEARNING_RATE = 1e-4
35
+ VALIDATE_EVERY = 100
36
+ PRIME_LENGTH = 32
37
+ GENERATE_EVERY = 250
38
+ GENERATE_LENGTH = 512
39
+ SEQ_LEN = 512
40
+
41
+ # helpers
42
+
43
+ def exists(v):
44
+ return v is not None
45
+
46
+ def cycle(loader):
47
+ while True:
48
+ for data in loader:
49
+ yield data
50
+
51
+ def decode_token(token):
52
+ return str(chr(max(32, token)))
53
+
54
+ def decode_tokens(tokens):
55
+ return "".join(list(map(decode_token, tokens)))
56
+
57
+ # sampling helpers
58
+
59
+ def log(t, eps = 1e-20):
60
+ return torch.log(t.clamp(min = eps))
61
+
62
+ def gumbel_noise(t):
63
+ noise = torch.rand_like(t)
64
+ return -log(-log(noise))
65
+
66
+ def gumbel_sample(t, temperature = 1., dim = -1, keepdim = True):
67
+ return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim, keepdim = keepdim)
68
+
69
+ def top_k(logits, thres = 0.9):
70
+ k = math.ceil((1 - thres) * logits.shape[-1])
71
+ val, ind = torch.topk(logits, k)
72
+ probs = torch.full_like(logits, float('-inf'))
73
+ probs.scatter_(-1, ind, val)
74
+ return probs
75
+
76
+ class Transformer(Module):
77
+ def __init__(
78
+ self,
79
+ *,
80
+ num_tokens,
81
+ dim,
82
+ depth,
83
+ heads = 8,
84
+ implicit_mlp_attn_hiddens = (64, 96, 64),
85
+ use_nested_attn = False,
86
+ dim_head = 64,
87
+ ff_expansion = 4.,
88
+ attn_kwargs: dict = dict(),
89
+ ):
90
+ super().__init__()
91
+ self.token_emb = nn.Embedding(num_tokens, dim)
92
+
93
+ self.layers = ModuleList([])
94
+
95
+ for _ in range(depth):
96
+
97
+ if use_nested_attn:
98
+ attn = NestedAttention(
99
+ dim = dim,
100
+ dim_head = dim_head,
101
+ heads = heads,
102
+ **attn_kwargs
103
+ )
104
+ else:
105
+ attn = ImplicitMLPAttention(
106
+ dim = dim,
107
+ mlp_hiddens = implicit_mlp_attn_hiddens,
108
+ heads = heads,
109
+ **attn_kwargs
110
+ )
111
+
112
+ ff = nn.Sequential(
113
+ nn.RMSNorm(dim),
114
+ nn.Linear(dim, int(dim * ff_expansion)),
115
+ nn.GELU(),
116
+ nn.Linear(int(dim * ff_expansion), dim)
117
+ )
118
+
119
+ self.layers.append(ModuleList([attn, ff]))
120
+
121
+ self.norm = nn.RMSNorm(dim)
122
+ self.to_logits = nn.Linear(dim, num_tokens, bias = False)
123
+
124
+ def sample(
125
+ self,
126
+ prompt: Tensor,
127
+ seq_len: int,
128
+ temperature = 1.,
129
+ filter_thres = 0.9,
130
+ ):
131
+ prompt_seq_len, out = prompt.shape[-1], prompt.clone()
132
+ sample_num_times = max(0, seq_len - prompt_seq_len)
133
+
134
+ for _ in range(sample_num_times):
135
+ logits = self.forward(out, return_loss = False)
136
+ logits = logits[:, -1]
137
+
138
+ logits = top_k(logits, thres = filter_thres)
139
+ sample = gumbel_sample(logits, temperature = temperature, dim = -1)
140
+
141
+ out = torch.cat((out, sample), dim = -1)
142
+
143
+ return out[..., prompt_seq_len:]
144
+
145
+ def forward(self, x, return_loss = False):
146
+
147
+ if return_loss:
148
+ x, target = x[:, :-1], x[:, 1:]
149
+
150
+ seq_len, device = x.shape[-1], x.device
151
+
152
+ tokens = self.token_emb(x)
153
+
154
+ for attn, ff in self.layers:
155
+ tokens = attn(tokens) + tokens
156
+ tokens = ff(tokens) + tokens
157
+
158
+ embed = self.norm(tokens)
159
+ logits = self.to_logits(embed)
160
+
161
+ if not return_loss:
162
+ return logits
163
+
164
+ return F.cross_entropy(
165
+ rearrange(logits, 'b n l -> b l n'),
166
+ target
167
+ )
168
+
169
+ model = Transformer(
170
+ num_tokens = 256,
171
+ dim = 512,
172
+ depth = 6,
173
+ implicit_mlp_attn_hiddens = (64, 96, 64),
174
+ use_nested_attn = True # test implicit mlp attn vs nested attn
175
+ )
176
+
177
+ # prepare enwik8 data
178
+
179
+ with gzip.open("./data/enwik8.gz") as file:
180
+ data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
181
+ np_train, np_valid = np.split(data, [int(90e6)])
182
+ data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
183
+
184
+ class TextSamplerDataset(Dataset):
185
+ def __init__(self, data, seq_len):
186
+ super().__init__()
187
+ self.data = data
188
+ self.seq_len = seq_len
189
+
190
+ def __len__(self):
191
+ return self.data.size(0) // self.seq_len
192
+
193
+ def __getitem__(self, index):
194
+ rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
195
+ full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
196
+ return full_seq
197
+
198
+ train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
199
+ val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
200
+ train_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE)
201
+ val_loader = DataLoader(val_dataset, batch_size = BATCH_SIZE)
202
+
203
+ # optimizer
204
+
205
+ optim = Adam(model.parameters(), lr = LEARNING_RATE)
206
+
207
+ # accelerate
208
+
209
+ accelerator = Accelerator()
210
+
211
+ model, optim, train_loader, val_loader = accelerator.prepare(model, optim, train_loader, val_loader)
212
+
213
+ # cycle
214
+
215
+ train_loader = cycle(train_loader)
216
+ val_loader = cycle(val_loader)
217
+
218
+ # training
219
+
220
+ for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10.0, desc = "training"):
221
+ model.train()
222
+
223
+ for _ in range(GRAD_ACCUM_EVERY):
224
+ data = next(train_loader)
225
+
226
+ loss = model(data, return_loss = True)
227
+
228
+ accelerator.backward(loss / GRAD_ACCUM_EVERY)
229
+
230
+ accelerator.print(f"training loss: {loss.item():.3f}")
231
+
232
+ torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
233
+
234
+ optim.step()
235
+ optim.zero_grad()
236
+
237
+ if i % VALIDATE_EVERY == 0:
238
+ model.eval()
239
+ with torch.no_grad():
240
+ valid_data = next(val_loader)
241
+
242
+ loss = model(valid_data, return_loss = True)
243
+ accelerator.print(f"validation loss: {loss.item():.3f}")
244
+
245
+ if i % GENERATE_EVERY == 0:
246
+ model.eval()
247
+
248
+ inp = next(val_loader)[0, :PRIME_LENGTH]
249
+
250
+ prime = decode_tokens(inp)
251
+ accelerator.print(f"\n\n[prompt]: {prime}")
252
+
253
+ prompt = inp[None, ...]
254
+
255
+ sampled = model.sample(prompt, GENERATE_LENGTH)
256
+
257
+ base_decode_output = decode_tokens(sampled[0])
258
+
259
+ accelerator.print(f"\n[generated]: {base_decode_output}\n\n")
train_mac.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = [
3
+ # "accelerate",
4
+ # "adam-atan2-pytorch>=0.1.18",
5
+ # "setuptools",
6
+ # "titans-pytorch",
7
+ # "tqdm",
8
+ # "wandb"
9
+ # ]
10
+ # ///
11
+
12
+ import random
13
+ import tqdm
14
+ import gzip
15
+ import numpy as np
16
+
17
+ import torch
18
+ from torch import nn, Tensor
19
+ from torch.nn import functional as F
20
+ from torch.utils.data import DataLoader, Dataset
21
+
22
+ from adam_atan2_pytorch import AdoptAtan2
23
+
24
+ from titans_pytorch import (
25
+ MemoryAsContextTransformer,
26
+ MemoryMLP,
27
+ MemoryAttention
28
+ )
29
+
30
+ # constants
31
+
32
+ NUM_BATCHES = int(1e5)
33
+ BATCH_SIZE = 4
34
+ GRADIENT_ACCUMULATE_EVERY = 4
35
+ LEARNING_RATE = 2e-4
36
+ VALIDATE_EVERY = 100
37
+ GENERATE_EVERY = 500
38
+ PRIME_LENGTH = 100
39
+ GENERATE_LENGTH = 512
40
+ SHOULD_GENERATE = True
41
+ SEQ_LEN = 512
42
+
43
+ # neural memory related
44
+
45
+ NEURAL_MEMORY_DEPTH = 2
46
+ NUM_PERSIST_MEM = 4
47
+ NUM_LONGTERM_MEM = 4
48
+ NEURAL_MEM_LAYERS = (2, 4, 6) # layers 2, 4, 6 have neural memory, can add more
49
+ NEURAL_MEM_GATE_ATTN_OUTPUT = False
50
+ NEURAL_MEM_MOMENTUM = True
51
+ NEURAL_MEM_MOMENTUM_ORDER = 1
52
+ NEURAL_MEM_QK_NORM = True
53
+ NEURAL_MEM_MAX_LR = 1e-1
54
+ USE_MEM_ATTENTION_MODEL = False
55
+ WINDOW_SIZE = 32
56
+ NEURAL_MEM_SEGMENT_LEN = 4 # set smaller for more granularity for learning rate / momentum etc
57
+ NEURAL_MEM_BATCH_SIZE = 128 # set smaller to update the neural memory weights more often as it traverses the sequence
58
+ SLIDING_WINDOWS = True
59
+ STORE_ATTN_POOL_CHUNKS = True # whether to use attention pooling for chunk derived momentum, per-layer lr mod, decay
60
+ MEMORY_MODEL_PER_LAYER_LEARNED_LR = True
61
+ NEURAL_MEM_WEIGHT_RESIDUAL = True # learning to accept contributions from the weights of the previous neural mem layer brings about significant improvements. this was improvised and not in the paper, but inspired by the value residual learning free lunch paper
62
+ NEURAL_MEM_QKV_RECEIVES_DIFF_VIEW = True # will allow the neural memory to select what layers from which to derive queries / keys / values, effectively allowing it to graft itself to the transformer in any way to be beneficial. this is to address an issue from a phd student who noted that the mem network is learning nothing more than wk @ wv. this also generalizes all possible ways to connect the neural memory to a transformer, a sort of NAS
63
+ NEURAL_MEM_SPEC_NORM_SURPRISES = True # applying lessons from Muon optimizer to surprise updates, by spectral norming the surprises
64
+
65
+ # experiment related
66
+
67
+ PROJECT_NAME = 'titans-mac-transformer'
68
+ RUN_NAME = f'mac - {NUM_LONGTERM_MEM} longterm mems, layers {NEURAL_MEM_LAYERS}'
69
+ WANDB_ONLINE = False # turn this on to pipe experiment to cloud
70
+
71
+ # perf related
72
+
73
+ USE_ACCELERATED_SCAN = True
74
+ USE_FLEX_ATTN = True
75
+ USE_FAST_INFERENCE = False
76
+
77
+ # wandb experiment tracker
78
+
79
+ import wandb
80
+ wandb.init(project = PROJECT_NAME, mode = 'disabled' if not WANDB_ONLINE else 'online')
81
+ wandb.run.name = RUN_NAME
82
+ wandb.run.save()
83
+
84
+ # helpers
85
+
86
+ def cycle(loader):
87
+ while True:
88
+ for data in loader:
89
+ yield data
90
+
91
+ def decode_token(token):
92
+ return str(chr(max(32, token)))
93
+
94
+ def decode_tokens(tokens):
95
+ return ''.join(list(map(decode_token, tokens)))
96
+
97
+ # memory model
98
+
99
+ if USE_MEM_ATTENTION_MODEL:
100
+ neural_memory_model = MemoryAttention(
101
+ dim = 64
102
+ )
103
+ else:
104
+ neural_memory_model = MemoryMLP(
105
+ dim = 64,
106
+ depth = NEURAL_MEMORY_DEPTH
107
+ )
108
+
109
+ # instantiate memory-as-context transformer
110
+
111
+ model = MemoryAsContextTransformer(
112
+ num_tokens = 256,
113
+ dim = 384,
114
+ depth = 8,
115
+ segment_len = WINDOW_SIZE,
116
+ num_persist_mem_tokens = NUM_PERSIST_MEM,
117
+ num_longterm_mem_tokens = NUM_LONGTERM_MEM,
118
+ neural_memory_layers = NEURAL_MEM_LAYERS,
119
+ neural_memory_segment_len = NEURAL_MEM_SEGMENT_LEN,
120
+ neural_memory_batch_size = NEURAL_MEM_BATCH_SIZE,
121
+ neural_mem_gate_attn_output = NEURAL_MEM_GATE_ATTN_OUTPUT,
122
+ neural_mem_weight_residual = NEURAL_MEM_WEIGHT_RESIDUAL,
123
+ neural_memory_qkv_receives_diff_views = NEURAL_MEM_QKV_RECEIVES_DIFF_VIEW,
124
+ use_flex_attn = USE_FLEX_ATTN,
125
+ sliding_window_attn = SLIDING_WINDOWS,
126
+ neural_memory_model = neural_memory_model,
127
+ neural_memory_kwargs = dict(
128
+ dim_head = 64,
129
+ heads = 4,
130
+ attn_pool_chunks = STORE_ATTN_POOL_CHUNKS,
131
+ qk_rmsnorm = NEURAL_MEM_QK_NORM,
132
+ momentum = NEURAL_MEM_MOMENTUM,
133
+ momentum_order = NEURAL_MEM_MOMENTUM_ORDER,
134
+ default_step_transform_max_lr = NEURAL_MEM_MAX_LR,
135
+ use_accelerated_scan = USE_ACCELERATED_SCAN,
136
+ per_parameter_lr_modulation = MEMORY_MODEL_PER_LAYER_LEARNED_LR,
137
+ spectral_norm_surprises = NEURAL_MEM_SPEC_NORM_SURPRISES
138
+ )
139
+ ).cuda()
140
+
141
+ # prepare enwik8 data
142
+
143
+ with gzip.open('./data/enwik8.gz') as file:
144
+ data = np.frombuffer(file.read(int(95e6)), dtype = np.uint8).copy()
145
+ data_train, data_val = np.split(data, [int(90e6)])
146
+ data_train, data_val = map(torch.from_numpy, (data_train, data_val))
147
+
148
+ class TextSamplerDataset(Dataset):
149
+ def __init__(self, data, seq_len):
150
+ super().__init__()
151
+ self.data = data
152
+ self.seq_len = seq_len
153
+
154
+ def __getitem__(self, index):
155
+ rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
156
+ full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
157
+ return full_seq.cuda()
158
+
159
+ def __len__(self):
160
+ return self.data.size(0) // self.seq_len
161
+
162
+ train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
163
+ val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
164
+ train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
165
+ val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
166
+
167
+ # optimizer
168
+
169
+ optim = AdoptAtan2(model.parameters(), lr = LEARNING_RATE)
170
+
171
+ # training
172
+
173
+ for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10., desc = 'training'):
174
+ model.train()
175
+
176
+ for __ in range(GRADIENT_ACCUMULATE_EVERY):
177
+ loss = model(next(train_loader), return_loss = True)
178
+ loss.backward()
179
+
180
+ print(f'training loss: {loss.item():.4f}')
181
+ torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
182
+ optim.step()
183
+ optim.zero_grad()
184
+ wandb.log(dict(loss = loss.item()))
185
+
186
+ if i % VALIDATE_EVERY == 0:
187
+ model.eval()
188
+ with torch.no_grad():
189
+ loss = model(next(val_loader), return_loss = True)
190
+ print(f'validation loss: {loss.item():.4f}')
191
+
192
+ if SHOULD_GENERATE and i % GENERATE_EVERY == 0:
193
+ model.eval()
194
+ inp = random.choice(val_dataset)[:PRIME_LENGTH]
195
+ prime = decode_tokens(inp)
196
+ print(f'%s \n\n %s', (prime, '*' * 100))
197
+
198
+ sample = model.sample(inp[None, ...], GENERATE_LENGTH, use_cache = USE_FAST_INFERENCE)
199
+ output_str = decode_tokens(sample[0])
200
+ print(output_str)