Add files using upload-large-folder tool
Browse files- .ipynb_checkpoints/llm_downloader-checkpoint.py +13 -0
- LlamaFactory/.dockerignore +15 -0
- LlamaFactory/.env.local +45 -0
- LlamaFactory/.gitattributes +2 -0
- LlamaFactory/.gitignore +183 -0
- LlamaFactory/.pre-commit-config.yaml +28 -0
- LlamaFactory/CITATION.cff +44 -0
- LlamaFactory/LICENSE +201 -0
- LlamaFactory/MANIFEST.in +1 -0
- LlamaFactory/Makefile +28 -0
- LlamaFactory/README.md +957 -0
- LlamaFactory/README_zh.md +960 -0
- LlamaFactory/pyproject.toml +149 -0
- LlamaFactory/wandb/debug-cli.root.log +0 -0
- LlamaFactory/wandb/debug-internal.log +8 -0
- LlamaFactory/wandb/debug.log +350 -0
- LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/config.yaml +723 -0
- LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/output.log +0 -0
- LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/wandb-metadata.json +41 -0
- LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/wandb-summary.json +1 -0
.ipynb_checkpoints/llm_downloader-checkpoint.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import snapshot_download
|
| 2 |
+
repo_id = "Qwen/QwQ-32B"
|
| 3 |
+
md = "/workspace/Qwen/QwQ-32B"
|
| 4 |
+
|
| 5 |
+
snapshot_download(
|
| 6 |
+
repo_id=repo_id,
|
| 7 |
+
cache_dir=md,
|
| 8 |
+
local_dir=md,
|
| 9 |
+
# repo_type="dataset",
|
| 10 |
+
local_dir_use_symlinks=False,
|
| 11 |
+
# resume_download=True,
|
| 12 |
+
ignore_patterns=["original/*"],
|
| 13 |
+
)
|
LlamaFactory/.dockerignore
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.vscode
|
| 2 |
+
.git
|
| 3 |
+
.github
|
| 4 |
+
.venv
|
| 5 |
+
cache
|
| 6 |
+
docker
|
| 7 |
+
saves
|
| 8 |
+
hf_cache
|
| 9 |
+
ms_cache
|
| 10 |
+
om_cache
|
| 11 |
+
shared_data
|
| 12 |
+
output
|
| 13 |
+
.dockerignore
|
| 14 |
+
.gitattributes
|
| 15 |
+
.gitignore
|
LlamaFactory/.env.local
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Note: actually we do not support .env, just for reference
|
| 2 |
+
# api
|
| 3 |
+
API_HOST=
|
| 4 |
+
API_PORT=
|
| 5 |
+
API_KEY=
|
| 6 |
+
API_MODEL_NAME=
|
| 7 |
+
API_VERBOSE=
|
| 8 |
+
FASTAPI_ROOT_PATH=
|
| 9 |
+
MAX_CONCURRENT=
|
| 10 |
+
# general
|
| 11 |
+
DISABLE_VERSION_CHECK=
|
| 12 |
+
FORCE_CHECK_IMPORTS=
|
| 13 |
+
ALLOW_EXTRA_ARGS=
|
| 14 |
+
LLAMAFACTORY_VERBOSITY=
|
| 15 |
+
USE_MODELSCOPE_HUB=
|
| 16 |
+
USE_OPENMIND_HUB=
|
| 17 |
+
USE_RAY=
|
| 18 |
+
USE_KT=
|
| 19 |
+
RECORD_VRAM=
|
| 20 |
+
OPTIM_TORCH=
|
| 21 |
+
NPU_JIT_COMPILE=
|
| 22 |
+
# torchrun
|
| 23 |
+
FORCE_TORCHRUN=
|
| 24 |
+
MASTER_ADDR=
|
| 25 |
+
MASTER_PORT=
|
| 26 |
+
NNODES=
|
| 27 |
+
NODE_RANK=
|
| 28 |
+
NPROC_PER_NODE=
|
| 29 |
+
# wandb
|
| 30 |
+
WANDB_DISABLED=
|
| 31 |
+
WANDB_PROJECT=
|
| 32 |
+
WANDB_API_KEY=
|
| 33 |
+
# gradio ui
|
| 34 |
+
GRADIO_SHARE=
|
| 35 |
+
GRADIO_SERVER_NAME=
|
| 36 |
+
GRADIO_SERVER_PORT=
|
| 37 |
+
GRADIO_ROOT_PATH=
|
| 38 |
+
GRADIO_IPV6=
|
| 39 |
+
# backend
|
| 40 |
+
USE_MCA=
|
| 41 |
+
# setup
|
| 42 |
+
ENABLE_SHORT_CONSOLE=
|
| 43 |
+
# reserved (do not use)
|
| 44 |
+
LLAMABOARD_ENABLED=
|
| 45 |
+
LLAMABOARD_WORKDIR=
|
LlamaFactory/.gitattributes
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Auto detect text files and perform LF normalization
|
| 2 |
+
* text=auto
|
LlamaFactory/.gitignore
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
.python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# poetry
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 102 |
+
#poetry.lock
|
| 103 |
+
|
| 104 |
+
# pdm
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 106 |
+
#pdm.lock
|
| 107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 108 |
+
# in version control.
|
| 109 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 110 |
+
.pdm.toml
|
| 111 |
+
|
| 112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 113 |
+
__pypackages__/
|
| 114 |
+
|
| 115 |
+
# Celery stuff
|
| 116 |
+
celerybeat-schedule
|
| 117 |
+
celerybeat.pid
|
| 118 |
+
|
| 119 |
+
# SageMath parsed files
|
| 120 |
+
*.sage.py
|
| 121 |
+
|
| 122 |
+
# Environments
|
| 123 |
+
.env
|
| 124 |
+
.venv
|
| 125 |
+
env/
|
| 126 |
+
venv/
|
| 127 |
+
ENV/
|
| 128 |
+
env.bak/
|
| 129 |
+
venv.bak/
|
| 130 |
+
|
| 131 |
+
# Spyder project settings
|
| 132 |
+
.spyderproject
|
| 133 |
+
.spyproject
|
| 134 |
+
|
| 135 |
+
# Rope project settings
|
| 136 |
+
.ropeproject
|
| 137 |
+
|
| 138 |
+
# mkdocs documentation
|
| 139 |
+
/site
|
| 140 |
+
|
| 141 |
+
# mypy
|
| 142 |
+
.mypy_cache/
|
| 143 |
+
.dmypy.json
|
| 144 |
+
dmypy.json
|
| 145 |
+
|
| 146 |
+
# Pyre type checker
|
| 147 |
+
.pyre/
|
| 148 |
+
|
| 149 |
+
# pytype static type analyzer
|
| 150 |
+
.pytype/
|
| 151 |
+
|
| 152 |
+
# Cython debug symbols
|
| 153 |
+
cython_debug/
|
| 154 |
+
|
| 155 |
+
# PyCharm
|
| 156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 160 |
+
.idea/
|
| 161 |
+
|
| 162 |
+
# vscode
|
| 163 |
+
.vscode/
|
| 164 |
+
|
| 165 |
+
# uv
|
| 166 |
+
uv.lock
|
| 167 |
+
|
| 168 |
+
# macOS
|
| 169 |
+
.DS_Store
|
| 170 |
+
|
| 171 |
+
# custom .gitignore
|
| 172 |
+
hf_cache/
|
| 173 |
+
ms_cache/
|
| 174 |
+
om_cache/
|
| 175 |
+
llamaboard_cache/
|
| 176 |
+
llamaboard_config/
|
| 177 |
+
saves/
|
| 178 |
+
output/
|
| 179 |
+
outputs/
|
| 180 |
+
wandb/
|
| 181 |
+
swanlog/
|
| 182 |
+
generated_predictions.jsonl
|
| 183 |
+
predictions_score.json
|
LlamaFactory/.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 3 |
+
rev: v6.0.0
|
| 4 |
+
hooks:
|
| 5 |
+
- id: check-ast
|
| 6 |
+
- id: check-added-large-files
|
| 7 |
+
args: ['--maxkb=25000']
|
| 8 |
+
- id: check-merge-conflict
|
| 9 |
+
- id: check-yaml
|
| 10 |
+
- id: debug-statements
|
| 11 |
+
- id: end-of-file-fixer
|
| 12 |
+
- id: trailing-whitespace
|
| 13 |
+
args: [--markdown-linebreak-ext=md]
|
| 14 |
+
- id: no-commit-to-branch
|
| 15 |
+
args: ['--branch', 'main']
|
| 16 |
+
|
| 17 |
+
- repo: https://github.com/asottile/pyupgrade
|
| 18 |
+
rev: v3.20.0
|
| 19 |
+
hooks:
|
| 20 |
+
- id: pyupgrade
|
| 21 |
+
args: [--py39-plus]
|
| 22 |
+
|
| 23 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
| 24 |
+
rev: v0.13.2
|
| 25 |
+
hooks:
|
| 26 |
+
- id: ruff
|
| 27 |
+
args: [--fix]
|
| 28 |
+
- id: ruff-format
|
LlamaFactory/CITATION.cff
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
date-released: 2024-03
|
| 3 |
+
message: "If you use this software, please cite it as below."
|
| 4 |
+
authors:
|
| 5 |
+
- family-names: "Zheng"
|
| 6 |
+
given-names: "Yaowei"
|
| 7 |
+
- family-names: "Zhang"
|
| 8 |
+
given-names: "Richong"
|
| 9 |
+
- family-names: "Zhang"
|
| 10 |
+
given-names: "Junhao"
|
| 11 |
+
- family-names: "Ye"
|
| 12 |
+
given-names: "Yanhan"
|
| 13 |
+
- family-names: "Luo"
|
| 14 |
+
given-names: "Zheyan"
|
| 15 |
+
- family-names: "Feng"
|
| 16 |
+
given-names: "Zhangchi"
|
| 17 |
+
- family-names: "Ma"
|
| 18 |
+
given-names: "Yongqiang"
|
| 19 |
+
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
| 20 |
+
url: "https://arxiv.org/abs/2403.13372"
|
| 21 |
+
preferred-citation:
|
| 22 |
+
type: conference-paper
|
| 23 |
+
conference:
|
| 24 |
+
name: "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)"
|
| 25 |
+
authors:
|
| 26 |
+
- family-names: "Zheng"
|
| 27 |
+
given-names: "Yaowei"
|
| 28 |
+
- family-names: "Zhang"
|
| 29 |
+
given-names: "Richong"
|
| 30 |
+
- family-names: "Zhang"
|
| 31 |
+
given-names: "Junhao"
|
| 32 |
+
- family-names: "Ye"
|
| 33 |
+
given-names: "Yanhan"
|
| 34 |
+
- family-names: "Luo"
|
| 35 |
+
given-names: "Zheyan"
|
| 36 |
+
- family-names: "Feng"
|
| 37 |
+
given-names: "Zhangchi"
|
| 38 |
+
- family-names: "Ma"
|
| 39 |
+
given-names: "Yongqiang"
|
| 40 |
+
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
| 41 |
+
url: "https://arxiv.org/abs/2403.13372"
|
| 42 |
+
year: 2024
|
| 43 |
+
publisher: "Association for Computational Linguistics"
|
| 44 |
+
address: "Bangkok, Thailand"
|
LlamaFactory/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
LlamaFactory/MANIFEST.in
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
include LICENSE
|
LlamaFactory/Makefile
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.PHONY: build commit license quality style test
|
| 2 |
+
|
| 3 |
+
check_dirs := scripts src tests tests_v1
|
| 4 |
+
|
| 5 |
+
RUN := $(shell command -v uv >/dev/null 2>&1 && echo "uv run" || echo "")
|
| 6 |
+
BUILD := $(shell command -v uv >/dev/null 2>&1 && echo "uv build" || echo "python -m build")
|
| 7 |
+
TOOL := $(shell command -v uv >/dev/null 2>&1 && echo "uvx" || echo "")
|
| 8 |
+
|
| 9 |
+
build:
|
| 10 |
+
$(BUILD)
|
| 11 |
+
|
| 12 |
+
commit:
|
| 13 |
+
$(TOOL) pre-commit install
|
| 14 |
+
$(TOOL) pre-commit run --all-files
|
| 15 |
+
|
| 16 |
+
license:
|
| 17 |
+
$(RUN) python3 tests/check_license.py $(check_dirs)
|
| 18 |
+
|
| 19 |
+
quality:
|
| 20 |
+
$(TOOL) ruff check $(check_dirs)
|
| 21 |
+
$(TOOL) ruff format --check $(check_dirs)
|
| 22 |
+
|
| 23 |
+
style:
|
| 24 |
+
$(TOOL) ruff check $(check_dirs) --fix
|
| 25 |
+
$(TOOL) ruff format $(check_dirs)
|
| 26 |
+
|
| 27 |
+
test:
|
| 28 |
+
WANDB_DISABLED=true $(RUN) pytest -vv --import-mode=importlib tests/ tests_v1/
|
LlamaFactory/README.md
ADDED
|
@@ -0,0 +1,957 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+

|
| 2 |
+
|
| 3 |
+
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
| 4 |
+
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
| 5 |
+
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
| 6 |
+
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
| 7 |
+
[](https://pypi.org/project/llamafactory/)
|
| 8 |
+
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
| 9 |
+
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
| 10 |
+
|
| 11 |
+
[](https://twitter.com/llamafactory_ai)
|
| 12 |
+
[](https://discord.gg/rKfvV9r9FK)
|
| 13 |
+
[](https://github.com/hiyouga/llamafactory-community)
|
| 14 |
+
[](https://blog.llamafactory.net/en/)
|
| 15 |
+
|
| 16 |
+
[](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
| 17 |
+
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
| 18 |
+
[](https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory)
|
| 19 |
+
[](https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory)
|
| 20 |
+
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
| 21 |
+
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
| 22 |
+
[](https://novita.ai/templates-library/105981?sharer=88115474-394e-4bda-968e-b88e123d0c47)
|
| 23 |
+
|
| 24 |
+
### Used by [Amazon](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/), [NVIDIA](https://developer.nvidia.com/rtx/ai-toolkit), [Aliyun](https://help.aliyun.com/zh/pai/use-cases/fine-tune-a-llama-3-model-with-llama-factory), etc.
|
| 25 |
+
|
| 26 |
+
<div align="center" markdown="1">
|
| 27 |
+
|
| 28 |
+
### Supporters ❤️
|
| 29 |
+
|
| 30 |
+
| <div style="text-align: center;"><a href="https://warp.dev/llama-factory"><img alt="Warp sponsorship" width="400" src="assets/sponsors/warp.jpg"></a><br><a href="https://warp.dev/llama-factory" style="font-size:larger;">Warp, the agentic terminal for developers</a><br><a href="https://warp.dev/llama-factory">Available for MacOS, Linux, & Windows</a> | <a href="https://serpapi.com"><img alt="SerpAPI sponsorship" width="250" src="assets/sponsors/serpapi.svg"> </a> |
|
| 31 |
+
| ---- | ---- |
|
| 32 |
+
|
| 33 |
+
----
|
| 34 |
+
|
| 35 |
+
### Easily fine-tune 100+ large language models with zero-code [CLI](#quickstart) and [Web UI](#fine-tuning-with-llama-board-gui-powered-by-gradio)
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+
</div>
|
| 40 |
+
|
| 41 |
+
👋 Join our [WeChat](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/main.jpg), [NPU](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/npu.jpg), [Lab4AI](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/lab4ai.jpg), [LLaMA Factory Online](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/online.jpg) user group.
|
| 42 |
+
|
| 43 |
+
\[ English | [中文](README_zh.md) \]
|
| 44 |
+
|
| 45 |
+
**Fine-tuning a large language model can be easy as...**
|
| 46 |
+
|
| 47 |
+
https://github.com/user-attachments/assets/3991a3a8-4276-4d30-9cab-4cb0c4b9b99e
|
| 48 |
+
|
| 49 |
+
Start local training:
|
| 50 |
+
- Please refer to [usage](#getting-started)
|
| 51 |
+
|
| 52 |
+
Start cloud training:
|
| 53 |
+
- **Colab (free)**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
| 54 |
+
- **PAI-DSW (free trial)**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
| 55 |
+
- **LLaMA Factory Online**: https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory
|
| 56 |
+
- **Alaya NeW (cloud GPU deal)**: https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
| 57 |
+
|
| 58 |
+
Read technical notes:
|
| 59 |
+
- **Documentation (WIP)**: https://llamafactory.readthedocs.io/en/latest/
|
| 60 |
+
- **Documentation (AMD GPU)**: https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/notebooks/fine_tune/llama_factory_llama3.html
|
| 61 |
+
- **Official Blog**: https://blog.llamafactory.net/en/
|
| 62 |
+
- **Official Course**: https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory
|
| 63 |
+
|
| 64 |
+
> [!NOTE]
|
| 65 |
+
> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them.
|
| 66 |
+
|
| 67 |
+
## Table of Contents
|
| 68 |
+
|
| 69 |
+
- [Features](#features)
|
| 70 |
+
- [Blogs](#blogs)
|
| 71 |
+
- [Changelog](#changelog)
|
| 72 |
+
- [Supported Models](#supported-models)
|
| 73 |
+
- [Supported Training Approaches](#supported-training-approaches)
|
| 74 |
+
- [Provided Datasets](#provided-datasets)
|
| 75 |
+
- [Requirement](#requirement)
|
| 76 |
+
- [Getting Started](#getting-started)
|
| 77 |
+
- [Installation](#installation)
|
| 78 |
+
- [Data Preparation](#data-preparation)
|
| 79 |
+
- [Quickstart](#quickstart)
|
| 80 |
+
- [Fine-Tuning with LLaMA Board GUI](#fine-tuning-with-llama-board-gui-powered-by-gradio)
|
| 81 |
+
- [LLaMA Factory Online](#llama-factory-online)
|
| 82 |
+
- [Build Docker](#build-docker)
|
| 83 |
+
- [Deploy with OpenAI-style API and vLLM](#deploy-with-openai-style-api-and-vllm)
|
| 84 |
+
- [Download from ModelScope Hub](#download-from-modelscope-hub)
|
| 85 |
+
- [Download from Modelers Hub](#download-from-modelers-hub)
|
| 86 |
+
- [Use W&B Logger](#use-wb-logger)
|
| 87 |
+
- [Use SwanLab Logger](#use-swanlab-logger)
|
| 88 |
+
- [Projects using LLaMA Factory](#projects-using-llama-factory)
|
| 89 |
+
- [License](#license)
|
| 90 |
+
- [Citation](#citation)
|
| 91 |
+
- [Acknowledgement](#acknowledgement)
|
| 92 |
+
|
| 93 |
+
## Features
|
| 94 |
+
|
| 95 |
+
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen3, Qwen3-VL, DeepSeek, Gemma, GLM, Phi, etc.
|
| 96 |
+
- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO, etc.
|
| 97 |
+
- **Scalable resources**: 16-bit full-tuning, freeze-tuning, LoRA and 2/3/4/5/6/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ.
|
| 98 |
+
- **Advanced algorithms**: [GaLore](https://github.com/jiaweizzhao/GaLore), [BAdam](https://github.com/Ledzy/BAdam), [APOLLO](https://github.com/zhuhanqing/APOLLO), [Adam-mini](https://github.com/zyushun/Adam-mini), [Muon](https://github.com/KellerJordan/Muon), [OFT](https://github.com/huggingface/peft/tree/main/src/peft/tuners/oft), DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and PiSSA.
|
| 99 |
+
- **Practical tricks**: [FlashAttention-2](https://github.com/Dao-AILab/flash-attention), [Unsloth](https://github.com/unslothai/unsloth), [Liger Kernel](https://github.com/linkedin/Liger-Kernel), [KTransformers](https://github.com/kvcache-ai/ktransformers/), RoPE scaling, NEFTune and rsLoRA.
|
| 100 |
+
- **Wide tasks**: Multi-turn dialogue, tool using, image understanding, visual grounding, video recognition, audio understanding, etc.
|
| 101 |
+
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, [SwanLab](https://github.com/SwanHubX/SwanLab), etc.
|
| 102 |
+
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with [vLLM worker](https://github.com/vllm-project/vllm) or [SGLang worker](https://github.com/sgl-project/sglang).
|
| 103 |
+
|
| 104 |
+
### Day-N Support for Fine-Tuning Cutting-Edge Models
|
| 105 |
+
|
| 106 |
+
| Support Date | Model Name |
|
| 107 |
+
| ------------ | -------------------------------------------------------------------- |
|
| 108 |
+
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / GLM-4.1V / InternLM 3 / MiniCPM-o-2.6 |
|
| 109 |
+
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
| 110 |
+
|
| 111 |
+
## Blogs
|
| 112 |
+
|
| 113 |
+
> [!TIP]
|
| 114 |
+
> Now we have a dedicated blog for LLaMA Factory!
|
| 115 |
+
>
|
| 116 |
+
> Website: https://blog.llamafactory.net/en/
|
| 117 |
+
|
| 118 |
+
- 💡 [KTransformers Fine-Tuning × LLaMA Factory: Fine-tuning 1000 Billion models with 2 4090-GPU + CPU](https://blog.llamafactory.net/en/posts/ktransformers/) (English)
|
| 119 |
+
- 💡 [Easy Dataset × LLaMA Factory: Enabling LLMs to Efficiently Learn Domain Knowledge](https://buaa-act.feishu.cn/wiki/GVzlwYcRFiR8OLkHbL6cQpYin7g) (English)
|
| 120 |
+
- [Fine-tune a mental health LLM using LLaMA-Factory](https://www.lab4ai.cn/project/detail?id=25cce32ec131497b9e06a93336a0817f&type=project&utm_source=LLaMA-Factory) (Chinese)
|
| 121 |
+
- [Fine-tune GPT-OSS for Role-Playing using LLaMA-Factory](https://docs.llamafactory.com.cn/docs/documents/best-practice/gptroleplay/?utm_source=LLaMA-Factory) (Chinese)
|
| 122 |
+
- [A One-Stop Code-Free Model Reinforcement Learning and Deployment Platform based on LLaMA-Factory and EasyR1](https://aws.amazon.com/cn/blogs/china/building-llm-model-hub-based-on-llamafactory-and-easyr1/) (Chinese)
|
| 123 |
+
- [How Apoidea Group enhances visual information extraction from banking documents with multimodal models using LLaMA-Factory on Amazon SageMaker HyperPod](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/) (English)
|
| 124 |
+
|
| 125 |
+
<details><summary>All Blogs</summary>
|
| 126 |
+
|
| 127 |
+
- [Fine-tune Llama3.1-70B for Medical Diagnosis using LLaMA-Factory](https://docs.alayanew.com/docs/documents/bestPractice/bigModel/llama70B/?utm_source=LLaMA-Factory) (Chinese)
|
| 128 |
+
- [Fine-tune Qwen2.5-VL for Autonomous Driving using LLaMA-Factory](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory) (Chinese)
|
| 129 |
+
- [LLaMA Factory: Fine-tuning the DeepSeek-R1-Distill-Qwen-7B Model for News Classifier](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b) (Chinese)
|
| 130 |
+
- [A One-Stop Code-Free Model Fine-Tuning \& Deployment Platform based on SageMaker and LLaMA-Factory](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/) (Chinese)
|
| 131 |
+
- [LLaMA Factory Multi-Modal Fine-Tuning Practice: Fine-Tuning Qwen2-VL for Personal Tourist Guide](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) (Chinese)
|
| 132 |
+
- [LLaMA Factory: Fine-tuning Llama3 for Role-Playing](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) (Chinese)
|
| 133 |
+
|
| 134 |
+
</details>
|
| 135 |
+
|
| 136 |
+
## Changelog
|
| 137 |
+
|
| 138 |
+
[25/10/26] We support Megatron-core training backend with [**mcore_adapter**](https://github.com/alibaba/ROLL/tree/main/mcore_adapter). See [PR #9237](https://github.com/hiyouga/LLaMA-Factory/pull/9237) to get started.
|
| 139 |
+
|
| 140 |
+
[25/08/22] We supported **[OFT](https://arxiv.org/abs/2306.07280)** and **[OFTv2](https://arxiv.org/abs/2506.19847)**. See [examples](examples/README.md) for usage.
|
| 141 |
+
|
| 142 |
+
[25/08/20] We supported fine-tuning the **[Intern-S1-mini](https://huggingface.co/internlm/Intern-S1-mini)** models. See [PR #8976](https://github.com/hiyouga/LLaMA-Factory/pull/8976) to get started.
|
| 143 |
+
|
| 144 |
+
[25/08/06] We supported fine-tuning the **[GPT-OSS](https://github.com/openai/gpt-oss)** models. See [PR #8826](https://github.com/hiyouga/LLaMA-Factory/pull/8826) to get started.
|
| 145 |
+
|
| 146 |
+
<details><summary>Full Changelog</summary>
|
| 147 |
+
|
| 148 |
+
[25/07/02] We supported fine-tuning the **[GLM-4.1V-9B-Thinking](https://github.com/THUDM/GLM-4.1V-Thinking)** model.
|
| 149 |
+
|
| 150 |
+
[25/04/28] We supported fine-tuning the **[Qwen3](https://qwenlm.github.io/blog/qwen3/)** model family.
|
| 151 |
+
|
| 152 |
+
[25/04/21] We supported the **[Muon](https://github.com/KellerJordan/Muon)** optimizer. See [examples](examples/README.md) for usage. Thank [@tianshijing](https://github.com/tianshijing)'s PR.
|
| 153 |
+
|
| 154 |
+
[25/04/16] We supported fine-tuning the **[InternVL3](https://huggingface.co/OpenGVLab/InternVL3-8B)** model. See [PR #7258](https://github.com/hiyouga/LLaMA-Factory/pull/7258) to get started.
|
| 155 |
+
|
| 156 |
+
[25/04/14] We supported fine-tuning the **[GLM-Z1](https://huggingface.co/THUDM/GLM-Z1-9B-0414)** and **[Kimi-VL](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct)** models.
|
| 157 |
+
|
| 158 |
+
[25/04/06] We supported fine-tuning the **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** model. See [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) to get started.
|
| 159 |
+
|
| 160 |
+
[25/03/31] We supported fine-tuning the **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** model. See [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) to get started.
|
| 161 |
+
|
| 162 |
+
[25/03/15] We supported **[SGLang](https://github.com/sgl-project/sglang)** as inference backend. Try `infer_backend: sglang` to accelerate inference.
|
| 163 |
+
|
| 164 |
+
[25/03/12] We supported fine-tuning the **[Gemma 3](https://huggingface.co/blog/gemma3)** model.
|
| 165 |
+
|
| 166 |
+
[25/02/24] Announcing **[EasyR1](https://github.com/hiyouga/EasyR1)**, an efficient, scalable and multi-modality RL training framework for efficient GRPO training.
|
| 167 |
+
|
| 168 |
+
[25/02/11] We supported saving the **[Ollama](https://github.com/ollama/ollama)** modelfile when exporting the model checkpoints. See [examples](examples/README.md) for usage.
|
| 169 |
+
|
| 170 |
+
[25/02/05] We supported fine-tuning the **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** and **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** on audio understanding tasks.
|
| 171 |
+
|
| 172 |
+
[25/01/31] We supported fine-tuning the **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** and **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** models.
|
| 173 |
+
|
| 174 |
+
[25/01/15] We supported **[APOLLO](https://arxiv.org/abs/2412.05270)** optimizer. See [examples](examples/README.md) for usage.
|
| 175 |
+
|
| 176 |
+
[25/01/14] We supported fine-tuning the **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** and **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** models. Thank [@BUAADreamer](https://github.com/BUAADreamer)'s PR.
|
| 177 |
+
|
| 178 |
+
[25/01/14] We supported fine-tuning the **[InternLM 3](https://huggingface.co/collections/internlm/)** models. Thank [@hhaAndroid](https://github.com/hhaAndroid)'s PR.
|
| 179 |
+
|
| 180 |
+
[25/01/10] We supported fine-tuning the **[Phi-4](https://huggingface.co/microsoft/phi-4)** model.
|
| 181 |
+
|
| 182 |
+
[24/12/21] We supported using **[SwanLab](https://github.com/SwanHubX/SwanLab)** for experiment tracking and visualization. See [this section](#use-swanlab-logger) for details.
|
| 183 |
+
|
| 184 |
+
[24/11/27] We supported fine-tuning the **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** model and the **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** dataset.
|
| 185 |
+
|
| 186 |
+
[24/10/09] We supported downloading pre-trained models and datasets from the **[Modelers Hub](https://modelers.cn/models)**. See [this tutorial](#download-from-modelers-hub) for usage.
|
| 187 |
+
|
| 188 |
+
[24/09/19] We supported fine-tuning the **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** models.
|
| 189 |
+
|
| 190 |
+
[24/08/30] We supported fine-tuning the **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** models. Thank [@simonJJJ](https://github.com/simonJJJ)'s PR.
|
| 191 |
+
|
| 192 |
+
[24/08/27] We supported **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**. Try `enable_liger_kernel: true` for efficient training.
|
| 193 |
+
|
| 194 |
+
[24/08/09] We supported **[Adam-mini](https://github.com/zyushun/Adam-mini)** optimizer. See [examples](examples/README.md) for usage. Thank [@relic-yuexi](https://github.com/relic-yuexi)'s PR.
|
| 195 |
+
|
| 196 |
+
[24/07/04] We supported [contamination-free packed training](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing). Use `neat_packing: true` to activate it. Thank [@chuan298](https://github.com/chuan298)'s PR.
|
| 197 |
+
|
| 198 |
+
[24/06/16] We supported **[PiSSA](https://arxiv.org/abs/2404.02948)** algorithm. See [examples](examples/README.md) for usage.
|
| 199 |
+
|
| 200 |
+
[24/06/07] We supported fine-tuning the **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** and **[GLM-4](https://github.com/THUDM/GLM-4)** models.
|
| 201 |
+
|
| 202 |
+
[24/05/26] We supported **[SimPO](https://arxiv.org/abs/2405.14734)** algorithm for preference learning. See [examples](examples/README.md) for usage.
|
| 203 |
+
|
| 204 |
+
[24/05/20] We supported fine-tuning the **PaliGemma** series models. Note that the PaliGemma models are pre-trained models, you need to fine-tune them with `paligemma` template for chat completion.
|
| 205 |
+
|
| 206 |
+
[24/05/18] We supported **[KTO](https://arxiv.org/abs/2402.01306)** algorithm for preference learning. See [examples](examples/README.md) for usage.
|
| 207 |
+
|
| 208 |
+
[24/05/14] We supported training and inference on the Ascend NPU devices. Check [installation](#installation) section for details.
|
| 209 |
+
|
| 210 |
+
[24/04/26] We supported fine-tuning the **LLaVA-1.5** multimodal LLMs. See [examples](examples/README.md) for usage.
|
| 211 |
+
|
| 212 |
+
[24/04/22] We provided a **[Colab notebook](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)** for fine-tuning the Llama-3 model on a free T4 GPU. Two Llama-3-derived models fine-tuned using LLaMA Factory are available at Hugging Face, check [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) and [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese) for details.
|
| 213 |
+
|
| 214 |
+
[24/04/21] We supported **[Mixture-of-Depths](https://arxiv.org/abs/2404.02258)** according to [AstraMindAI's implementation](https://github.com/astramind-ai/Mixture-of-depths). See [examples](examples/README.md) for usage.
|
| 215 |
+
|
| 216 |
+
[24/04/16] We supported **[BAdam](https://arxiv.org/abs/2404.02827)** optimizer. See [examples](examples/README.md) for usage.
|
| 217 |
+
|
| 218 |
+
[24/04/16] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s long-sequence training (Llama-2-7B-56k within 24GB). It achieves **117%** speed and **50%** memory compared with FlashAttention-2, more benchmarks can be found in [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison).
|
| 219 |
+
|
| 220 |
+
[24/03/31] We supported **[ORPO](https://arxiv.org/abs/2403.07691)**. See [examples](examples/README.md) for usage.
|
| 221 |
+
|
| 222 |
+
[24/03/21] Our paper "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" is available at arXiv!
|
| 223 |
+
|
| 224 |
+
[24/03/20] We supported **FSDP+QLoRA** that fine-tunes a 70B model on 2x24GB GPUs. See [examples](examples/README.md) for usage.
|
| 225 |
+
|
| 226 |
+
[24/03/13] We supported **[LoRA+](https://arxiv.org/abs/2402.12354)**. See [examples](examples/README.md) for usage.
|
| 227 |
+
|
| 228 |
+
[24/03/07] We supported **[GaLore](https://arxiv.org/abs/2403.03507)** optimizer. See [examples](examples/README.md) for usage.
|
| 229 |
+
|
| 230 |
+
[24/03/07] We integrated **[vLLM](https://github.com/vllm-project/vllm)** for faster and concurrent inference. Try `infer_backend: vllm` to enjoy **270%** inference speed.
|
| 231 |
+
|
| 232 |
+
[24/02/28] We supported weight-decomposed LoRA (**[DoRA](https://arxiv.org/abs/2402.09353)**). Try `use_dora: true` to activate DoRA training.
|
| 233 |
+
|
| 234 |
+
[24/02/15] We supported **block expansion** proposed by [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro). See [examples](examples/README.md) for usage.
|
| 235 |
+
|
| 236 |
+
[24/02/05] Qwen1.5 (Qwen2 beta version) series models are supported in LLaMA-Factory. Check this [blog post](https://qwenlm.github.io/blog/qwen1.5/) for details.
|
| 237 |
+
|
| 238 |
+
[24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `dataset: glaive_toolcall_en`.
|
| 239 |
+
|
| 240 |
+
[23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `use_unsloth: true` argument to activate unsloth patch. It achieves **170%** speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details.
|
| 241 |
+
|
| 242 |
+
[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
|
| 243 |
+
|
| 244 |
+
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)**. See [this tutorial](#download-from-modelscope-hub) for usage.
|
| 245 |
+
|
| 246 |
+
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `neftune_noise_alpha: 5` argument to activate NEFTune.
|
| 247 |
+
|
| 248 |
+
[23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `shift_attn: true` argument to enable shift short attention.
|
| 249 |
+
|
| 250 |
+
[23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [examples](examples/README.md) for usage.
|
| 251 |
+
|
| 252 |
+
[23/09/10] We supported **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**. Try `flash_attn: fa2` argument to enable FlashAttention-2 if you are using RTX4090, A100 or H100 GPUs.
|
| 253 |
+
|
| 254 |
+
[23/08/12] We supported **RoPE scaling** to extend the context length of the LLaMA models. Try `rope_scaling: linear` argument in training and `rope_scaling: dynamic` argument at inference to extrapolate the position embeddings.
|
| 255 |
+
|
| 256 |
+
[23/08/11] We supported **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [examples](examples/README.md) for usage.
|
| 257 |
+
|
| 258 |
+
[23/07/31] We supported **dataset streaming**. Try `streaming: true` and `max_steps: 10000` arguments to load your dataset in streaming mode.
|
| 259 |
+
|
| 260 |
+
[23/07/29] We released two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft)) for details.
|
| 261 |
+
|
| 262 |
+
[23/07/18] We developed an **all-in-one Web UI** for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development.
|
| 263 |
+
|
| 264 |
+
[23/07/09] We released **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested.
|
| 265 |
+
|
| 266 |
+
[23/06/29] We provided a **reproducible example** of training a chat model using instruction-following datasets, see [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft) for details.
|
| 267 |
+
|
| 268 |
+
[23/06/22] We aligned the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**.
|
| 269 |
+
|
| 270 |
+
[23/06/03] We supported quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). See [examples](examples/README.md) for usage.
|
| 271 |
+
|
| 272 |
+
</details>
|
| 273 |
+
|
| 274 |
+
> [!TIP]
|
| 275 |
+
> If you cannot use the latest feature, please pull the latest code and install LLaMA-Factory again.
|
| 276 |
+
|
| 277 |
+
## Supported Models
|
| 278 |
+
|
| 279 |
+
| Model | Model size | Template |
|
| 280 |
+
| ----------------------------------------------------------------- | -------------------------------- | -------------------- |
|
| 281 |
+
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
| 282 |
+
| [DeepSeek (LLM/Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
| 283 |
+
| [DeepSeek 3-3.2](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
| 284 |
+
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
| 285 |
+
| [ERNIE-4.5](https://huggingface.co/baidu) | 0.3B/21B/300B | ernie_nothink |
|
| 286 |
+
| [Falcon/Falcon H1](https://huggingface.co/tiiuae) | 0.5B/1.5B/3B/7B/11B/34B/40B/180B | falcon/falcon_h1 |
|
| 287 |
+
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma/gemma2 |
|
| 288 |
+
| [Gemma 3/Gemma 3n](https://huggingface.co/google) | 270M/1B/4B/6B/8B/12B/27B | gemma3/gemma3n |
|
| 289 |
+
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/zai-org) | 9B/32B | glm4/glmz1 |
|
| 290 |
+
| [GLM-4.5/GLM-4.5(6)V](https://huggingface.co/zai-org) | 9B/106B/355B | glm4_moe/glm4_5v |
|
| 291 |
+
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
| 292 |
+
| [GPT-OSS](https://huggingface.co/openai) | 20B/120B | gpt_oss |
|
| 293 |
+
| [Granite 3-4](https://huggingface.co/ibm-granite) | 1B/2B/3B/7B/8B | granite3/granite4 |
|
| 294 |
+
| [Hunyuan/Hunyuan1.5 (MT)](https://huggingface.co/tencent/) | 0.5B/1.8B/4B/7B/13B | hunyuan/hunyuan_small |
|
| 295 |
+
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
| 296 |
+
| [InternVL 2.5-3.5](https://huggingface.co/OpenGVLab) | 1B/2B/4B/8B/14B/30B/38B/78B/241B | intern_vl |
|
| 297 |
+
| [Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
|
| 298 |
+
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
| 299 |
+
| [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 |
|
| 300 |
+
| [LFM 2.5 (VL)](https://huggingface.co/LiquidAI) | 1.2B/1.6B | lfm2/lfm2_vl |
|
| 301 |
+
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
| 302 |
+
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
| 303 |
+
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
| 304 |
+
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
| 305 |
+
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
| 306 |
+
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
| 307 |
+
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
| 308 |
+
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
| 309 |
+
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B/309B | mimo/mimo_v2 |
|
| 310 |
+
| [MiniCPM 4](https://huggingface.co/openbmb) | 0.5B/8B | cpm4 |
|
| 311 |
+
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
| 312 |
+
| [MiniMax-M1/MiniMax-M2](https://huggingface.co/MiniMaxAI/models) | 229B/456B | minimax1/minimax2 |
|
| 313 |
+
| [Ministral 3](https://huggingface.co/mistralai) | 3B/8B/14B | ministral3 |
|
| 314 |
+
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
| 315 |
+
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
| 316 |
+
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
| 317 |
+
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
| 318 |
+
| [Phi-4-mini/Phi-4](https://huggingface.co/microsoft) | 3.8B/14B | phi4_mini/phi4 |
|
| 319 |
+
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
| 320 |
+
| [Qwen2 (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
| 321 |
+
| [Qwen3 (MoE/Instruct/Thinking/Next)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/80B/235B | qwen3/qwen3_nothink |
|
| 322 |
+
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
| 323 |
+
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
| 324 |
+
| [Qwen3-Omni](https://huggingface.co/Qwen) | 30B | qwen3_omni |
|
| 325 |
+
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
| 326 |
+
| [Qwen3-VL](https://huggingface.co/Qwen) | 2B/4B/8B/30B/32B/235B | qwen3_vl |
|
| 327 |
+
| [Seed (OSS/Coder)](https://huggingface.co/ByteDance-Seed) | 8B/36B | seed_oss/seed_coder |
|
| 328 |
+
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
| 329 |
+
| [TeleChat 2-2.5](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
| 330 |
+
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
| 331 |
+
|
| 332 |
+
> [!NOTE]
|
| 333 |
+
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
| 334 |
+
>
|
| 335 |
+
> If the model has both reasoning and non-reasoning versions, please use the `_nothink` suffix to distinguish between them. For example, `qwen3` and `qwen3_nothink`.
|
| 336 |
+
>
|
| 337 |
+
> Remember to use the **SAME** template in training and inference.
|
| 338 |
+
>
|
| 339 |
+
> \*: You should install the `transformers` from main branch and use `DISABLE_VERSION_CHECK=1` to skip version check.
|
| 340 |
+
>
|
| 341 |
+
> \*\*: You need to install a specific version of `transformers` to use the corresponding model.
|
| 342 |
+
|
| 343 |
+
Please refer to [constants.py](src/llamafactory/extras/constants.py) for a full list of models we supported.
|
| 344 |
+
|
| 345 |
+
You also can add a custom chat template to [template.py](src/llamafactory/data/template.py).
|
| 346 |
+
|
| 347 |
+
## Supported Training Approaches
|
| 348 |
+
|
| 349 |
+
| Approach | Full-tuning | Freeze-tuning | LoRA | QLoRA | OFT | QOFT |
|
| 350 |
+
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
| 351 |
+
| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 352 |
+
| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 353 |
+
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 354 |
+
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 355 |
+
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 356 |
+
| KTO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 357 |
+
| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 358 |
+
| SimPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 359 |
+
|
| 360 |
+
> [!TIP]
|
| 361 |
+
> The implementation details of PPO can be found in [this blog](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html).
|
| 362 |
+
|
| 363 |
+
## Provided Datasets
|
| 364 |
+
|
| 365 |
+
<details><summary>Pre-training datasets</summary>
|
| 366 |
+
|
| 367 |
+
- [Wiki Demo (en)](data/wiki_demo.txt)
|
| 368 |
+
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
|
| 369 |
+
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
|
| 370 |
+
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
|
| 371 |
+
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
| 372 |
+
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
| 373 |
+
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
| 374 |
+
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
| 375 |
+
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
| 376 |
+
- [CCI3-HQ (zh)](https://huggingface.co/datasets/BAAI/CCI3-HQ)
|
| 377 |
+
- [CCI3-Data (zh)](https://huggingface.co/datasets/BAAI/CCI3-Data)
|
| 378 |
+
- [CCI4.0-M2-Base-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Base-v1)
|
| 379 |
+
- [CCI4.0-M2-CoT-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-CoT-v1)
|
| 380 |
+
- [CCI4.0-M2-Extra-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Extra-v1)
|
| 381 |
+
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
| 382 |
+
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
| 383 |
+
|
| 384 |
+
</details>
|
| 385 |
+
|
| 386 |
+
<details><summary>Supervised fine-tuning datasets</summary>
|
| 387 |
+
|
| 388 |
+
- [Identity (en&zh)](data/identity.json)
|
| 389 |
+
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
| 390 |
+
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
|
| 391 |
+
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
| 392 |
+
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
| 393 |
+
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
| 394 |
+
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
| 395 |
+
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
| 396 |
+
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
| 397 |
+
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
|
| 398 |
+
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
|
| 399 |
+
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
| 400 |
+
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
| 401 |
+
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
| 402 |
+
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
| 403 |
+
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
| 404 |
+
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
| 405 |
+
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
|
| 406 |
+
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
|
| 407 |
+
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
|
| 408 |
+
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
|
| 409 |
+
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
|
| 410 |
+
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
|
| 411 |
+
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
| 412 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
| 413 |
+
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
| 414 |
+
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
| 415 |
+
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
| 416 |
+
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
| 417 |
+
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
| 418 |
+
- [Infinity Instruct (zh)](https://huggingface.co/datasets/BAAI/Infinity-Instruct)
|
| 419 |
+
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
| 420 |
+
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
| 421 |
+
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
| 422 |
+
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
| 423 |
+
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
| 424 |
+
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
| 425 |
+
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
| 426 |
+
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
| 427 |
+
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
|
| 428 |
+
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
| 429 |
+
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
|
| 430 |
+
- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
|
| 431 |
+
- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k)
|
| 432 |
+
- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT)
|
| 433 |
+
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
| 434 |
+
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
|
| 435 |
+
- [DLR-Web (en)](https://huggingface.co/datasets/Attention1115/DLR-Web)
|
| 436 |
+
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
| 437 |
+
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
| 438 |
+
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
| 439 |
+
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
|
| 440 |
+
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
|
| 441 |
+
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
|
| 442 |
+
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
|
| 443 |
+
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
|
| 444 |
+
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
|
| 445 |
+
|
| 446 |
+
</details>
|
| 447 |
+
|
| 448 |
+
<details><summary>Preference datasets</summary>
|
| 449 |
+
|
| 450 |
+
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
| 451 |
+
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
| 452 |
+
- [COIG-P (zh)](https://huggingface.co/datasets/m-a-p/COIG-P)
|
| 453 |
+
- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset)
|
| 454 |
+
- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback)
|
| 455 |
+
- [RLAIF-V (en)](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset)
|
| 456 |
+
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
| 457 |
+
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
| 458 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
| 459 |
+
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
| 460 |
+
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
|
| 461 |
+
|
| 462 |
+
</details>
|
| 463 |
+
|
| 464 |
+
Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands.
|
| 465 |
+
|
| 466 |
+
```bash
|
| 467 |
+
pip install "huggingface_hub<1.0.0"
|
| 468 |
+
huggingface-cli login
|
| 469 |
+
```
|
| 470 |
+
|
| 471 |
+
## Requirement
|
| 472 |
+
|
| 473 |
+
| Mandatory | Minimum | Recommend |
|
| 474 |
+
| ------------ | ------- | --------- |
|
| 475 |
+
| python | 3.9 | 3.10 |
|
| 476 |
+
| torch | 2.0.0 | 2.6.0 |
|
| 477 |
+
| torchvision | 0.15.0 | 0.21.0 |
|
| 478 |
+
| transformers | 4.49.0 | 4.50.0 |
|
| 479 |
+
| datasets | 2.16.0 | 3.2.0 |
|
| 480 |
+
| accelerate | 0.34.0 | 1.2.1 |
|
| 481 |
+
| peft | 0.14.0 | 0.15.1 |
|
| 482 |
+
| trl | 0.8.6 | 0.9.6 |
|
| 483 |
+
|
| 484 |
+
| Optional | Minimum | Recommend |
|
| 485 |
+
| ------------ | ------- | --------- |
|
| 486 |
+
| CUDA | 11.6 | 12.2 |
|
| 487 |
+
| deepspeed | 0.10.0 | 0.16.4 |
|
| 488 |
+
| bitsandbytes | 0.39.0 | 0.43.1 |
|
| 489 |
+
| vllm | 0.4.3 | 0.8.2 |
|
| 490 |
+
| flash-attn | 2.5.6 | 2.7.2 |
|
| 491 |
+
|
| 492 |
+
### Hardware Requirement
|
| 493 |
+
|
| 494 |
+
\* *estimated*
|
| 495 |
+
|
| 496 |
+
| Method | Bits | 7B | 14B | 30B | 70B | `x`B |
|
| 497 |
+
| ----------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
| 498 |
+
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
| 499 |
+
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
| 500 |
+
| Freeze/LoRA/GaLore/APOLLO/BAdam/OFT | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
| 501 |
+
| QLoRA / QOFT | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
| 502 |
+
| QLoRA / QOFT | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
| 503 |
+
| QLoRA / QOFT | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
| 504 |
+
|
| 505 |
+
## Getting Started
|
| 506 |
+
|
| 507 |
+
### Installation
|
| 508 |
+
|
| 509 |
+
> [!IMPORTANT]
|
| 510 |
+
> Installation is mandatory.
|
| 511 |
+
|
| 512 |
+
#### Install from Source
|
| 513 |
+
|
| 514 |
+
```bash
|
| 515 |
+
git clone --depth 1 https://github.com/hiyouga/LlamaFactory.git
|
| 516 |
+
cd LlamaFactory
|
| 517 |
+
pip install -e .
|
| 518 |
+
pip install -r requirements/metrics.txt
|
| 519 |
+
```
|
| 520 |
+
|
| 521 |
+
Optional dependencies available: `metrics`, `deepspeed`. Install with: `pip install -e . && pip install -r requirements/metrics.txt -r requirements/deepspeed.txt`
|
| 522 |
+
|
| 523 |
+
Additional dependencies for specific features are available in `examples/requirements/`.
|
| 524 |
+
|
| 525 |
+
#### Install from Docker Image
|
| 526 |
+
|
| 527 |
+
```bash
|
| 528 |
+
docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
|
| 529 |
+
```
|
| 530 |
+
|
| 531 |
+
This image is built on Ubuntu 22.04 (x86\_64), CUDA 12.4, Python 3.11, PyTorch 2.6.0, and Flash-attn 2.7.4.
|
| 532 |
+
|
| 533 |
+
Find the pre-built images: https://hub.docker.com/r/hiyouga/llamafactory/tags
|
| 534 |
+
|
| 535 |
+
Please refer to [build docker](#build-docker) to build the image yourself.
|
| 536 |
+
|
| 537 |
+
<details><summary>Setting up a virtual environment with <b>uv</b></summary>
|
| 538 |
+
|
| 539 |
+
Create an isolated Python environment with [uv](https://github.com/astral-sh/uv):
|
| 540 |
+
|
| 541 |
+
```bash
|
| 542 |
+
uv run llamafactory-cli webui
|
| 543 |
+
```
|
| 544 |
+
|
| 545 |
+
</details>
|
| 546 |
+
|
| 547 |
+
<details><summary>For Windows users</summary>
|
| 548 |
+
|
| 549 |
+
#### Install PyTorch
|
| 550 |
+
|
| 551 |
+
You need to manually install the GPU version of PyTorch on the Windows platform. Please refer to the [official website](https://pytorch.org/get-started/locally/) and the following command to install PyTorch with CUDA support:
|
| 552 |
+
|
| 553 |
+
```bash
|
| 554 |
+
pip uninstall torch torchvision torchaudio
|
| 555 |
+
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
|
| 556 |
+
python -c "import torch; print(torch.cuda.is_available())"
|
| 557 |
+
```
|
| 558 |
+
|
| 559 |
+
If you see `True` then you have successfully installed PyTorch with CUDA support.
|
| 560 |
+
|
| 561 |
+
Try `dataloader_num_workers: 0` if you encounter `Can't pickle local object` error.
|
| 562 |
+
|
| 563 |
+
#### Install BitsAndBytes
|
| 564 |
+
|
| 565 |
+
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you need to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2, please select the appropriate [release version](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels) based on your CUDA version.
|
| 566 |
+
|
| 567 |
+
```bash
|
| 568 |
+
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
| 569 |
+
```
|
| 570 |
+
|
| 571 |
+
#### Install Flash Attention-2
|
| 572 |
+
|
| 573 |
+
To enable FlashAttention-2 on the Windows platform, please use the script from [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) to compile and install it by yourself.
|
| 574 |
+
|
| 575 |
+
</details>
|
| 576 |
+
|
| 577 |
+
<details><summary>For Ascend NPU users</summary>
|
| 578 |
+
|
| 579 |
+
To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher: `pip install -r requirements/npu.txt`. Additionally, you need to install the **Ascend CANN Toolkit and Kernels**. Please follow the [installation tutorial](https://llamafactory.readthedocs.io/en/latest/advanced/npu_installation.html).
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
You can also download the pre-built Docker images:
|
| 583 |
+
|
| 584 |
+
```bash
|
| 585 |
+
# Docker Hub
|
| 586 |
+
docker pull hiyouga/llamafactory:latest-npu-a2
|
| 587 |
+
docker pull hiyouga/llamafactory:latest-npu-a3
|
| 588 |
+
|
| 589 |
+
# quay.io
|
| 590 |
+
docker pull quay.io/ascend/llamafactory:latest-npu-a2
|
| 591 |
+
docker pull quay.io/ascend/llamafactory:latest-npu-a3
|
| 592 |
+
```
|
| 593 |
+
|
| 594 |
+
#### Install BitsAndBytes
|
| 595 |
+
|
| 596 |
+
To use QLoRA based on bitsandbytes on Ascend NPU, please follow these 3 steps:
|
| 597 |
+
|
| 598 |
+
1. Manually compile bitsandbytes: Refer to [the installation documentation](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU) for the NPU version of bitsandbytes to complete the compilation and installation. The compilation requires a cmake version of at least 3.22.1 and a g++ version of at least 12.x.
|
| 599 |
+
|
| 600 |
+
```bash
|
| 601 |
+
# Install bitsandbytes from source
|
| 602 |
+
# Clone bitsandbytes repo, Ascend NPU backend is currently enabled on multi-backend-refactor branch
|
| 603 |
+
git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
|
| 604 |
+
cd bitsandbytes/
|
| 605 |
+
|
| 606 |
+
# Install dependencies
|
| 607 |
+
pip install -r requirements-dev.txt
|
| 608 |
+
|
| 609 |
+
# Install the dependencies for the compilation tools. Note that the commands for this step may vary depending on the operating system. The following are provided for reference
|
| 610 |
+
apt-get install -y build-essential cmake
|
| 611 |
+
|
| 612 |
+
# Compile & install
|
| 613 |
+
cmake -DCOMPUTE_BACKEND=npu -S .
|
| 614 |
+
make
|
| 615 |
+
pip install .
|
| 616 |
+
```
|
| 617 |
+
|
| 618 |
+
2. Install transformers from the main branch.
|
| 619 |
+
|
| 620 |
+
```bash
|
| 621 |
+
git clone -b main https://github.com/huggingface/transformers.git
|
| 622 |
+
cd transformers
|
| 623 |
+
pip install .
|
| 624 |
+
```
|
| 625 |
+
|
| 626 |
+
3. Set `double_quantization: false` in the configuration. You can refer to the [example](examples/train_qlora/qwen3_lora_sft_bnb_npu.yaml).
|
| 627 |
+
|
| 628 |
+
</details>
|
| 629 |
+
|
| 630 |
+
### Data Preparation
|
| 631 |
+
|
| 632 |
+
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can use datasets on HuggingFace / ModelScope / Modelers hub, load the dataset in local disk, or specify a path to s3/gcs cloud storage.
|
| 633 |
+
|
| 634 |
+
> [!NOTE]
|
| 635 |
+
> Please update `data/dataset_info.json` to use your custom dataset.
|
| 636 |
+
|
| 637 |
+
You can also use **[Easy Dataset](https://github.com/ConardLi/easy-dataset)**, **[DataFlow](https://github.com/OpenDCAI/DataFlow)** and **[GraphGen](https://github.com/open-sciencelab/GraphGen)** to create synthetic data for fine-tuning.
|
| 638 |
+
|
| 639 |
+
### Quickstart
|
| 640 |
+
|
| 641 |
+
Use the following 3 commands to run LoRA **fine-tuning**, **inference** and **merging** of the Qwen3-4B-Instruct model, respectively.
|
| 642 |
+
|
| 643 |
+
```bash
|
| 644 |
+
llamafactory-cli train examples/train_lora/qwen3_lora_sft.yaml
|
| 645 |
+
llamafactory-cli chat examples/inference/qwen3_lora_sft.yaml
|
| 646 |
+
llamafactory-cli export examples/merge_lora/qwen3_lora_sft.yaml
|
| 647 |
+
```
|
| 648 |
+
|
| 649 |
+
See [examples/README.md](examples/README.md) for advanced usage (including distributed training).
|
| 650 |
+
|
| 651 |
+
> [!TIP]
|
| 652 |
+
> Use `llamafactory-cli help` to show help information.
|
| 653 |
+
>
|
| 654 |
+
> Read [FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614) first if you encounter any problems.
|
| 655 |
+
|
| 656 |
+
### Fine-Tuning with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio))
|
| 657 |
+
|
| 658 |
+
```bash
|
| 659 |
+
llamafactory-cli webui
|
| 660 |
+
```
|
| 661 |
+
|
| 662 |
+
### LLaMA Factory Online
|
| 663 |
+
|
| 664 |
+
Read our [documentation](https://docs.llamafactory.com.cn/docs/documents/quickstart/getstarted/?utm_source=LLaMA-Factory).
|
| 665 |
+
|
| 666 |
+
### Build Docker
|
| 667 |
+
|
| 668 |
+
For CUDA users:
|
| 669 |
+
|
| 670 |
+
```bash
|
| 671 |
+
cd docker/docker-cuda/
|
| 672 |
+
docker compose up -d
|
| 673 |
+
docker compose exec llamafactory bash
|
| 674 |
+
```
|
| 675 |
+
|
| 676 |
+
For Ascend NPU users:
|
| 677 |
+
|
| 678 |
+
```bash
|
| 679 |
+
cd docker/docker-npu/
|
| 680 |
+
docker compose up -d
|
| 681 |
+
docker compose exec llamafactory bash
|
| 682 |
+
```
|
| 683 |
+
|
| 684 |
+
For AMD ROCm users:
|
| 685 |
+
|
| 686 |
+
```bash
|
| 687 |
+
cd docker/docker-rocm/
|
| 688 |
+
docker compose up -d
|
| 689 |
+
docker compose exec llamafactory bash
|
| 690 |
+
```
|
| 691 |
+
|
| 692 |
+
<details><summary>Build without Docker Compose</summary>
|
| 693 |
+
|
| 694 |
+
For CUDA users:
|
| 695 |
+
|
| 696 |
+
```bash
|
| 697 |
+
docker build -f ./docker/docker-cuda/Dockerfile \
|
| 698 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
| 699 |
+
-t llamafactory:latest .
|
| 700 |
+
|
| 701 |
+
docker run -dit --ipc=host --gpus=all \
|
| 702 |
+
-p 7860:7860 \
|
| 703 |
+
-p 8000:8000 \
|
| 704 |
+
--name llamafactory \
|
| 705 |
+
llamafactory:latest
|
| 706 |
+
|
| 707 |
+
docker exec -it llamafactory bash
|
| 708 |
+
```
|
| 709 |
+
|
| 710 |
+
For Ascend NPU users:
|
| 711 |
+
|
| 712 |
+
```bash
|
| 713 |
+
docker build -f ./docker/docker-npu/Dockerfile \
|
| 714 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
| 715 |
+
-t llamafactory:latest .
|
| 716 |
+
|
| 717 |
+
docker run -dit --ipc=host \
|
| 718 |
+
-v /usr/local/dcmi:/usr/local/dcmi \
|
| 719 |
+
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
| 720 |
+
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
| 721 |
+
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
| 722 |
+
-p 7860:7860 \
|
| 723 |
+
-p 8000:8000 \
|
| 724 |
+
--device /dev/davinci0 \
|
| 725 |
+
--device /dev/davinci_manager \
|
| 726 |
+
--device /dev/devmm_svm \
|
| 727 |
+
--device /dev/hisi_hdc \
|
| 728 |
+
--name llamafactory \
|
| 729 |
+
llamafactory:latest
|
| 730 |
+
|
| 731 |
+
docker exec -it llamafactory bash
|
| 732 |
+
```
|
| 733 |
+
|
| 734 |
+
For AMD ROCm users:
|
| 735 |
+
|
| 736 |
+
```bash
|
| 737 |
+
docker build -f ./docker/docker-rocm/Dockerfile \
|
| 738 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
| 739 |
+
-t llamafactory:latest .
|
| 740 |
+
|
| 741 |
+
docker run -dit --ipc=host \
|
| 742 |
+
-p 7860:7860 \
|
| 743 |
+
-p 8000:8000 \
|
| 744 |
+
--device /dev/kfd \
|
| 745 |
+
--device /dev/dri \
|
| 746 |
+
--name llamafactory \
|
| 747 |
+
llamafactory:latest
|
| 748 |
+
|
| 749 |
+
docker exec -it llamafactory bash
|
| 750 |
+
```
|
| 751 |
+
|
| 752 |
+
</details>
|
| 753 |
+
|
| 754 |
+
<details><summary>Use Docker volumes</summary>
|
| 755 |
+
|
| 756 |
+
You can uncomment `VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]` in the Dockerfile to use data volumes.
|
| 757 |
+
|
| 758 |
+
When building the Docker image, use `-v ./hf_cache:/root/.cache/huggingface` argument to mount the local directory to the container. The following data volumes are available.
|
| 759 |
+
|
| 760 |
+
- `hf_cache`: Utilize Hugging Face cache on the host machine.
|
| 761 |
+
- `shared_data`: The directionary to store datasets on the host machine.
|
| 762 |
+
- `output`: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
| 763 |
+
|
| 764 |
+
</details>
|
| 765 |
+
|
| 766 |
+
### Deploy with OpenAI-style API and vLLM
|
| 767 |
+
|
| 768 |
+
```bash
|
| 769 |
+
API_PORT=8000 llamafactory-cli api examples/inference/qwen3.yaml infer_backend=vllm vllm_enforce_eager=true
|
| 770 |
+
```
|
| 771 |
+
|
| 772 |
+
> [!TIP]
|
| 773 |
+
> Visit [this page](https://platform.openai.com/docs/api-reference/chat/create) for API document.
|
| 774 |
+
>
|
| 775 |
+
> Examples: [Image understanding](scripts/api_example/test_image.py) | [Function calling](scripts/api_example/test_toolcall.py)
|
| 776 |
+
|
| 777 |
+
### Download from ModelScope Hub
|
| 778 |
+
|
| 779 |
+
If you have trouble with downloading models and datasets from Hugging Face, you can use ModelScope.
|
| 780 |
+
|
| 781 |
+
```bash
|
| 782 |
+
export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
|
| 783 |
+
```
|
| 784 |
+
|
| 785 |
+
Train the model by specifying a model ID of the ModelScope Hub as the `model_name_or_path`. You can find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models), e.g., `LLM-Research/Meta-Llama-3-8B-Instruct`.
|
| 786 |
+
|
| 787 |
+
### Download from Modelers Hub
|
| 788 |
+
|
| 789 |
+
You can also use Modelers Hub to download models and datasets.
|
| 790 |
+
|
| 791 |
+
```bash
|
| 792 |
+
export USE_OPENMIND_HUB=1 # `set USE_OPENMIND_HUB=1` for Windows
|
| 793 |
+
```
|
| 794 |
+
|
| 795 |
+
Train the model by specifying a model ID of the Modelers Hub as the `model_name_or_path`. You can find a full list of model IDs at [Modelers Hub](https://modelers.cn/models), e.g., `TeleAI/TeleChat-7B-pt`.
|
| 796 |
+
|
| 797 |
+
### Use W&B Logger
|
| 798 |
+
|
| 799 |
+
To use [Weights & Biases](https://wandb.ai) for logging experimental results, you need to add the following arguments to yaml files.
|
| 800 |
+
|
| 801 |
+
```yaml
|
| 802 |
+
report_to: wandb
|
| 803 |
+
run_name: test_run # optional
|
| 804 |
+
```
|
| 805 |
+
|
| 806 |
+
Set `WANDB_API_KEY` to [your key](https://wandb.ai/authorize) when launching training tasks to log in with your W&B account.
|
| 807 |
+
|
| 808 |
+
### Use SwanLab Logger
|
| 809 |
+
|
| 810 |
+
To use [SwanLab](https://github.com/SwanHubX/SwanLab) for logging experimental results, you need to add the following arguments to yaml files.
|
| 811 |
+
|
| 812 |
+
```yaml
|
| 813 |
+
use_swanlab: true
|
| 814 |
+
swanlab_run_name: test_run # optional
|
| 815 |
+
```
|
| 816 |
+
|
| 817 |
+
When launching training tasks, you can log in to SwanLab in three ways:
|
| 818 |
+
|
| 819 |
+
1. Add `swanlab_api_key=<your_api_key>` to the yaml file, and set it to your [API key](https://swanlab.cn/settings).
|
| 820 |
+
2. Set the environment variable `SWANLAB_API_KEY` to your [API key](https://swanlab.cn/settings).
|
| 821 |
+
3. Use the `swanlab login` command to complete the login.
|
| 822 |
+
|
| 823 |
+
## Projects using LLaMA Factory
|
| 824 |
+
|
| 825 |
+
If you have a project that should be incorporated, please contact via email or create a pull request.
|
| 826 |
+
|
| 827 |
+
<details><summary>Click to show</summary>
|
| 828 |
+
|
| 829 |
+
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
|
| 830 |
+
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
|
| 831 |
+
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
| 832 |
+
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
| 833 |
+
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
| 834 |
+
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
| 835 |
+
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
| 836 |
+
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
| 837 |
+
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
| 838 |
+
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
| 839 |
+
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
| 840 |
+
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
| 841 |
+
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
| 842 |
+
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
| 843 |
+
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
| 844 |
+
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
| 845 |
+
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
| 846 |
+
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
| 847 |
+
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
| 848 |
+
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
| 849 |
+
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
| 850 |
+
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
| 851 |
+
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
| 852 |
+
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
| 853 |
+
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
| 854 |
+
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
| 855 |
+
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
| 856 |
+
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
| 857 |
+
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
| 858 |
+
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
| 859 |
+
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
| 860 |
+
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
| 861 |
+
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
| 862 |
+
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
| 863 |
+
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
| 864 |
+
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
| 865 |
+
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
| 866 |
+
1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760)
|
| 867 |
+
1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378)
|
| 868 |
+
1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055)
|
| 869 |
+
1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739)
|
| 870 |
+
1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816)
|
| 871 |
+
1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215)
|
| 872 |
+
1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30)
|
| 873 |
+
1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380)
|
| 874 |
+
1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106)
|
| 875 |
+
1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136)
|
| 876 |
+
1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496)
|
| 877 |
+
1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688)
|
| 878 |
+
1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955)
|
| 879 |
+
1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973)
|
| 880 |
+
1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115)
|
| 881 |
+
1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815)
|
| 882 |
+
1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099)
|
| 883 |
+
1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173)
|
| 884 |
+
1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074)
|
| 885 |
+
1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408)
|
| 886 |
+
1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546)
|
| 887 |
+
1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695)
|
| 888 |
+
1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233)
|
| 889 |
+
1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069)
|
| 890 |
+
1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh's Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25)
|
| 891 |
+
1. Li et al. Calibrating LLMs with Preference Optimization on Thought Trees for Generating Rationale in Science Question Scoring. 2024. [[arxiv]](https://arxiv.org/abs/2406.19949)
|
| 892 |
+
1. Yang et al. Financial Knowledge Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2407.00365)
|
| 893 |
+
1. Lin et al. DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging. 2024. [[arxiv]](https://arxiv.org/abs/2407.01470)
|
| 894 |
+
1. Bako et al. Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization. 2024. [[arxiv]](https://arxiv.org/abs/2407.06129)
|
| 895 |
+
1. Huang et al. RoLoRA: Fine-tuning Rotated Outlier-free LLMs for Effective Weight-Activation Quantization. 2024. [[arxiv]](https://arxiv.org/abs/2407.08044)
|
| 896 |
+
1. Jiang et al. LLM-Collaboration on Automatic Science Journalism for the General Audience. 2024. [[arxiv]](https://arxiv.org/abs/2407.09756)
|
| 897 |
+
1. Inouye et al. Applied Auto-tuning on LoRA Hyperparameters. 2024. [[paper]](https://scholarcommons.scu.edu/cseng_senior/272/)
|
| 898 |
+
1. Qi et al. Research on Tibetan Tourism Viewpoints information generation system based on LLM. 2024. [[arxiv]](https://arxiv.org/abs/2407.13561)
|
| 899 |
+
1. Xu et al. Course-Correction: Safety Alignment Using Synthetic Preferences. 2024. [[arxiv]](https://arxiv.org/abs/2407.16637)
|
| 900 |
+
1. Sun et al. LAMBDA: A Large Model Based Data Agent. 2024. [[arxiv]](https://arxiv.org/abs/2407.17535)
|
| 901 |
+
1. Zhu et al. CollectiveSFT: Scaling Large Language Models for Chinese Medical Benchmark with Collective Instructions in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2407.19705)
|
| 902 |
+
1. Yu et al. Correcting Negative Bias in Large Language Models through Negative Attention Score Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2408.00137)
|
| 903 |
+
1. Xie et al. The Power of Personalized Datasets: Advancing Chinese Composition Writing for Elementary School through Targeted Model Fine-Tuning. IALP 2024. [[paper]](https://www.asianlp.sg/conferences/ialp2024/proceedings/papers/IALP2024_P055.pdf)
|
| 904 |
+
1. Liu et al. Instruct-Code-Llama: Improving Capabilities of Language Model in Competition Level Code Generation by Online Judge Feedback. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_11)
|
| 905 |
+
1. Wang et al. Cybernetic Sentinels: Unveiling the Impact of Safety Data Selection on Model Security in Supervised Fine-Tuning. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_23)
|
| 906 |
+
1. Xia et al. Understanding the Performance and Estimating the Cost of LLM Fine-Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2408.04693)
|
| 907 |
+
1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168)
|
| 908 |
+
1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/)
|
| 909 |
+
1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072)
|
| 910 |
+
1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611)
|
| 911 |
+
1. Zhang et al. CPsyCoun: A Report-based Multi-turn Dialogue Reconstruction and Evaluation Framework for Chinese Psychological Counseling. ACL 2024. [[paper]](https://aclanthology.org/2024.findings-acl.830.pdf)
|
| 912 |
+
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
|
| 913 |
+
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
|
| 914 |
+
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
|
| 915 |
+
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
|
| 916 |
+
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
|
| 917 |
+
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
| 918 |
+
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B.
|
| 919 |
+
1. **[AutoRE](https://github.com/THUDM/AutoRE)**: A document-level relation extraction system based on large language models.
|
| 920 |
+
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**: SDKs for fine-tuning LLMs on Windows PC for NVIDIA RTX.
|
| 921 |
+
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**: An easy and lazy way for building multi-agent LLMs applications and supports model fine-tuning via LLaMA Factory.
|
| 922 |
+
1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**: A full pipeline for RAG retrieval model fine-tuning, inference, and distillation. [[blog]](https://zhuanlan.zhihu.com/p/987727357)
|
| 923 |
+
1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**: A modified library that supports long sequence SFT & DPO using ring attention.
|
| 924 |
+
1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**: An o1-like model fine-tuned by NovaSky AI with very small cost.
|
| 925 |
+
1. **[WeClone](https://github.com/xming521/WeClone)**: One-stop solution for creating your digital avatar from chat logs.
|
| 926 |
+
1. **[EmoLLM](https://github.com/SmartFlowAI/EmoLLM)**: A project about large language models (LLMs) and mental health.
|
| 927 |
+
</details>
|
| 928 |
+
|
| 929 |
+
## License
|
| 930 |
+
|
| 931 |
+
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
| 932 |
+
|
| 933 |
+
Please follow the model licenses to use the corresponding model weights: [BLOOM](https://huggingface.co/spaces/bigscience/license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Llama 4](https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
| 934 |
+
|
| 935 |
+
## Citation
|
| 936 |
+
|
| 937 |
+
If this work is helpful, please kindly cite as:
|
| 938 |
+
|
| 939 |
+
```bibtex
|
| 940 |
+
@inproceedings{zheng2024llamafactory,
|
| 941 |
+
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
| 942 |
+
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma},
|
| 943 |
+
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
|
| 944 |
+
address={Bangkok, Thailand},
|
| 945 |
+
publisher={Association for Computational Linguistics},
|
| 946 |
+
year={2024},
|
| 947 |
+
url={http://arxiv.org/abs/2403.13372}
|
| 948 |
+
}
|
| 949 |
+
```
|
| 950 |
+
|
| 951 |
+
## Acknowledgement
|
| 952 |
+
|
| 953 |
+
This repo benefits from [PEFT](https://github.com/huggingface/peft), [TRL](https://github.com/huggingface/trl), [QLoRA](https://github.com/artidoro/qlora) and [FastChat](https://github.com/lm-sys/FastChat). Thanks for their wonderful works.
|
| 954 |
+
|
| 955 |
+
## Star History
|
| 956 |
+
|
| 957 |
+

|
LlamaFactory/README_zh.md
ADDED
|
@@ -0,0 +1,960 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+

|
| 2 |
+
|
| 3 |
+
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
| 4 |
+
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
| 5 |
+
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
| 6 |
+
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
| 7 |
+
[](https://pypi.org/project/llamafactory/)
|
| 8 |
+
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
| 9 |
+
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
| 10 |
+
|
| 11 |
+
[](https://twitter.com/llamafactory_ai)
|
| 12 |
+
[](https://discord.gg/rKfvV9r9FK)
|
| 13 |
+
[](https://github.com/hiyouga/llamafactory-community)
|
| 14 |
+
[](https://blog.llamafactory.net/)
|
| 15 |
+
|
| 16 |
+
[](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
| 17 |
+
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
| 18 |
+
[](https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory)
|
| 19 |
+
[](https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory)
|
| 20 |
+
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
| 21 |
+
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
| 22 |
+
[](https://novita.ai/templates-library/105981?sharer=88115474-394e-4bda-968e-b88e123d0c47)
|
| 23 |
+
|
| 24 |
+
### 获得[亚马逊](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)、[英伟达](https://developer.nvidia.cn/rtx/ai-toolkit)、[阿里云](https://help.aliyun.com/zh/pai/use-cases/fine-tune-a-llama-3-model-with-llama-factory)等的应用。
|
| 25 |
+
|
| 26 |
+
<div align="center" markdown="1">
|
| 27 |
+
|
| 28 |
+
### 赞助商 ❤️
|
| 29 |
+
|
| 30 |
+
| <div style="text-align: center;"><a href="https://warp.dev/llama-factory"><img alt="Warp sponsorship" width="400" src="assets/sponsors/warp.jpg"></a><br><a href="https://warp.dev/llama-factory" style="font-size:larger;">Warp,面向开发者的智能终端</a><br><a href="https://warp.dev/llama-factory">适用于 MacOS、Linux 和 Windows</a> | <a href="https://serpapi.com"><img alt="SerpAPI sponsorship" width="250" src="assets/sponsors/serpapi.svg"> </a> |
|
| 31 |
+
| ---- | ---- |
|
| 32 |
+
|
| 33 |
+
----
|
| 34 |
+
|
| 35 |
+
### 使用零代码[命令行](#快速开始)与 [Web UI](#llama-board-可视化微调由-gradio-驱动) 轻松微调百余种大模型
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+
</div>
|
| 40 |
+
|
| 41 |
+
👋 加入我们的[微信群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/main.jpg)、[NPU 用户群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/npu.jpg)、[大模型实验室群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/lab4ai.jpg) 或 [LLaMA Factory Online 用户群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/online.png)。
|
| 42 |
+
|
| 43 |
+
\[ [English](README.md) | 中文 \]
|
| 44 |
+
|
| 45 |
+
**微调大模型可以像这样轻松…**
|
| 46 |
+
|
| 47 |
+
https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
| 48 |
+
|
| 49 |
+
开始本地训练:
|
| 50 |
+
- 请见[如何使用](#如何使用)
|
| 51 |
+
|
| 52 |
+
开始云端训练:
|
| 53 |
+
- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
| 54 |
+
- **PAI-DSW(免费试用)**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
| 55 |
+
- **LLaMA Factory Online(在线微调)**:https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory
|
| 56 |
+
- **九章智算云(算力优惠活动)**:https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
| 57 |
+
|
| 58 |
+
阅读技术文档:
|
| 59 |
+
- **入门教程**:https://zhuanlan.zhihu.com/p/695287607
|
| 60 |
+
- **微调视频教程**:https://www.bilibili.com/video/BV1djgRzxEts/
|
| 61 |
+
- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/
|
| 62 |
+
- **框架文档(昇腾 NPU)**:https://ascend.github.io/docs/sources/llamafactory/
|
| 63 |
+
- **官方博客**:https://blog.llamafactory.net/
|
| 64 |
+
- **官方课程**:https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory
|
| 65 |
+
|
| 66 |
+
> [!NOTE]
|
| 67 |
+
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
|
| 68 |
+
|
| 69 |
+
## 目录
|
| 70 |
+
|
| 71 |
+
- [项目特色](#项目特色)
|
| 72 |
+
- [官方博客](#官方博客)
|
| 73 |
+
- [更新日志](#更新日志)
|
| 74 |
+
- [模型](#模型)
|
| 75 |
+
- [训练方法](#训练方法)
|
| 76 |
+
- [数据集](#数据集)
|
| 77 |
+
- [软硬件依赖](#软硬件依赖)
|
| 78 |
+
- [如何使用](#如何使用)
|
| 79 |
+
- [安装 LLaMA Factory](#安装-llama-factory)
|
| 80 |
+
- [数据准备](#数据准备)
|
| 81 |
+
- [快速开始](#快速开始)
|
| 82 |
+
- [LLaMA Board 可视化微调](#llama-board-可视化微调由-gradio-驱动)
|
| 83 |
+
- [LLaMA Factory Online 在线微调](#llama-factory-online-在线微调)
|
| 84 |
+
- [构建 Docker](#构建-docker)
|
| 85 |
+
- [利用 vLLM 部署 OpenAI API](#利用-vllm-部署-openai-api)
|
| 86 |
+
- [从魔搭社区下载](#从魔搭社区下载)
|
| 87 |
+
- [从魔乐社区下载](#从魔乐社区下载)
|
| 88 |
+
- [使用 W&B 面板](#使用-wb-面板)
|
| 89 |
+
- [使用 SwanLab 面板](#使用-swanlab-面板)
|
| 90 |
+
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
|
| 91 |
+
- [协议](#协议)
|
| 92 |
+
- [引用](#引用)
|
| 93 |
+
- [致谢](#致谢)
|
| 94 |
+
|
| 95 |
+
## 项目特色
|
| 96 |
+
|
| 97 |
+
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen3、Qwen3-VL、DeepSeek、Gemma、GLM、Phi 等等。
|
| 98 |
+
- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。
|
| 99 |
+
- **多种精度**:16 比特全参数微调、冻结微调、LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ 的 2/3/4/5/6/8 比特 QLoRA 微调。
|
| 100 |
+
- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[APOLLO](https://github.com/zhuhanqing/APOLLO)、[Adam-mini](https://github.com/zyushun/Adam-mini)、[Muon](https://github.com/KellerJordan/Muon)、[OFT](https://github.com/huggingface/peft/tree/main/src/peft/tuners/oft)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 PiSSA。
|
| 101 |
+
- **实用技巧**:[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)、[Unsloth](https://github.com/unslothai/unsloth)、[Liger Kernel](https://github.com/linkedin/Liger-Kernel)、[KTransformers](https://github.com/kvcache-ai/ktransformers/)、RoPE scaling、NEFTune 和 rsLoRA。
|
| 102 |
+
- **广泛任务**:多轮对话、工具调用、图像理解、视觉定位、视频识别和语音理解等等。
|
| 103 |
+
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow、[SwanLab](https://github.com/SwanHubX/SwanLab) 等等。
|
| 104 |
+
- **极速推理**:基于 [vLLM](https://github.com/vllm-project/vllm) 或 [SGLang](https://github.com/sgl-project/sglang) 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
| 105 |
+
|
| 106 |
+
### 最新模型的 Day-N 微调适配
|
| 107 |
+
|
| 108 |
+
| 适配时间 | 模型名称 |
|
| 109 |
+
| ------------ | -------------------------------------------------------------------- |
|
| 110 |
+
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / GLM-4.1V / InternLM 3 / MiniCPM-o-2.6 |
|
| 111 |
+
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
| 112 |
+
|
| 113 |
+
## 官方博客
|
| 114 |
+
|
| 115 |
+
> [!TIP]
|
| 116 |
+
> 我们现在拥有了 LLaMA Factory 的专属博客!
|
| 117 |
+
>
|
| 118 |
+
> 网站地址:https://blog.llamafactory.net/
|
| 119 |
+
|
| 120 |
+
- 💡 [KTransformers Fine-Tuning × LLaMA Factory: 用2张4090级的GPU+CPU 微调 1000B规模的超大模型](https://swcil84qspu.feishu.cn/wiki/Z1sSwb2poijybxkyPEkcDG6enVc) (中文)
|
| 121 |
+
- 💡 [Easy Dataset × LLaMA Factory: 让大模型高效学习领域知识](https://buaa-act.feishu.cn/wiki/KY9xwTGs1iqHrRkjXBwcZP9WnL9)(中文)
|
| 122 |
+
- [使用 LLaMA-Factory 微调心理健康大模型](https://www.lab4ai.cn/project/detail?id=25cce32ec131497b9e06a93336a0817f&type=project&utm_source=LLaMA-Factory)(中文)
|
| 123 |
+
- [使用 LLaMA-Factory 构建 GPT-OSS 角色扮演模型](https://docs.llamafactory.com.cn/docs/documents/best-practice/gptroleplay/?utm_source=LLaMA-Factory)(中文)
|
| 124 |
+
- [基于 LLaMA-Factory 和 EasyR1 打造一站式无代码大模型强化学习和部署平台 LLM Model Hub](https://aws.amazon.com/cn/blogs/china/building-llm-model-hub-based-on-llamafactory-and-easyr1/)(中文)
|
| 125 |
+
- [通过亚马逊 SageMaker HyperPod 上的 LLaMA-Factory 增强多模态模型银行文档的视觉信息提取](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/)(英文)
|
| 126 |
+
|
| 127 |
+
<details><summary>全部博客</summary>
|
| 128 |
+
|
| 129 |
+
- [使用 LLaMA-Factory 微调 Llama3.1-70B 医学诊断模型](https://docs.alayanew.com/docs/documents/bestPractice/bigModel/llama70B/?utm_source=LLaMA-Factory)(中文)
|
| 130 |
+
- [使用 LLaMA-Factory 微调 Qwen2.5-VL 实现自动驾驶场景微调](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory)(中文)
|
| 131 |
+
- [LLaMA Factory:微调 DeepSeek-R1-Distill-Qwen-7B 模型实现新闻标题分类器](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b)(中文)
|
| 132 |
+
- [基于 Amazon SageMaker 和 LLaMA-Factory 打造一站式无代码模型微调部署平台 Model Hub](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)(中文)
|
| 133 |
+
- [LLaMA Factory 多模态微调实践:微调 Qwen2-VL 构建文旅大模型](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl)(中文)
|
| 134 |
+
- [LLaMA Factory:微调 Llama3 模型实现角色扮演](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)(中文)
|
| 135 |
+
|
| 136 |
+
</details>
|
| 137 |
+
|
| 138 |
+
## 更新日志
|
| 139 |
+
|
| 140 |
+
[25/10/26] 我们支持了Megatron-core作为训练后端和适配了[**mcore_adapter**](https://github.com/alibaba/ROLL/tree/main/mcore_adapter)。查看[PR #9237](https://github.com/hiyouga/LLaMA-Factory/pull/9237)以使用。
|
| 141 |
+
|
| 142 |
+
[25/08/22] 我们支持了 **[OFT](https://arxiv.org/abs/2306.07280)** 和 **[OFTv2](https://arxiv.org/abs/2506.19847)** 模型的微调。查看 [examples](examples/README.md) 以使用。
|
| 143 |
+
|
| 144 |
+
[25/08/20] 我们支持了 **[Intern-S1-mini](https://huggingface.co/internlm/Intern-S1-mini)** 模型的微调。查看 [PR #8976](https://github.com/hiyouga/LLaMA-Factory/pull/8976) 以使用。
|
| 145 |
+
|
| 146 |
+
[25/08/06] 我们支持了 **[GPT-OSS](https://github.com/openai/gpt-oss)** 模型的微调。查看 [PR #8826](https://github.com/hiyouga/LLaMA-Factory/pull/8826) 以使用。
|
| 147 |
+
|
| 148 |
+
<details><summary>展开日志</summary>
|
| 149 |
+
|
| 150 |
+
[25/07/02] 我们支持了 **[GLM-4.1V-9B-Thinking](https://github.com/THUDM/GLM-4.1V-Thinking)** 模型的微调。
|
| 151 |
+
|
| 152 |
+
[25/04/28] 我们支持了 **[Qwen3](https://qwenlm.github.io/blog/qwen3/)** 系列模型的微调。
|
| 153 |
+
|
| 154 |
+
[25/04/21] 我们支持了 **[Muon](https://github.com/KellerJordan/Muon)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@tianshijing](https://github.com/tianshijing) 的 PR。
|
| 155 |
+
|
| 156 |
+
[25/04/16] 我们支持了 **[InternVL3](https://huggingface.co/OpenGVLab/InternVL3-8B)** 模型的微调。查看 [PR #7258](https://github.com/hiyouga/LLaMA-Factory/pull/7258) 以使用。
|
| 157 |
+
|
| 158 |
+
[25/04/14] 我们支持了 **[GLM-Z1](https://huggingface.co/THUDM/GLM-Z1-9B-0414)** 和 **[Kimi-VL](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct)** 模型的微调。
|
| 159 |
+
|
| 160 |
+
[25/04/06] 我们支持了 **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** 模型的微调。查看 [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) 以使用。
|
| 161 |
+
|
| 162 |
+
[25/03/31] 我们支持了 **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** 模型的微调。查看 [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) 以使用。
|
| 163 |
+
|
| 164 |
+
[25/03/15] 我们支持了 **[SGLang](https://github.com/sgl-project/sglang)** 推理后端,请使用 `infer_backend: sglang` 启用。
|
| 165 |
+
|
| 166 |
+
[25/03/12] 我们支持了 **[Gemma 3](https://huggingface.co/blog/gemma3)** 模型的微调。
|
| 167 |
+
|
| 168 |
+
[25/02/24] 我们宣布开源 **[EasyR1](https://github.com/hiyouga/EasyR1)**,一个高效可扩展的多模态强化学习框架,支持高效的 GRPO 训练。
|
| 169 |
+
|
| 170 |
+
[25/02/11] 我们支持了在导出模型时保存 **[Ollama](https://github.com/ollama/ollama)** 配置文件。详细用法请参照 [examples](examples/README_zh.md)。
|
| 171 |
+
|
| 172 |
+
[25/02/05] 我们支持了在语音理解任务上微调 **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** 和 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 模型。
|
| 173 |
+
|
| 174 |
+
[25/01/31] 我们支持了 **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** 和 **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** 模型的微调。
|
| 175 |
+
|
| 176 |
+
[25/01/15] 我们支持了 **[APOLLO](https://arxiv.org/abs/2412.05270)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
| 177 |
+
|
| 178 |
+
[25/01/14] 我们支持了 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 和 **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** 模型的微调。 感谢 [@BUAADreamer](https://github.com/BUAADreamer) 的 PR.
|
| 179 |
+
|
| 180 |
+
[25/01/14] 我们支持了 **[InternLM 3](https://huggingface.co/collections/internlm/)** 模型的微调。感谢 [@hhaAndroid](https://github.com/hhaAndroid) 的 PR。
|
| 181 |
+
|
| 182 |
+
[25/01/10] 我们支持了 **[Phi-4](https://huggingface.co/microsoft/phi-4)** 模型的微调。
|
| 183 |
+
|
| 184 |
+
[24/12/21] 我们支持了使用 **[SwanLab](https://github.com/SwanHubX/SwanLab)** 跟踪与可视化实验。详细用法请参考 [此部分](#使用-swanlab-面板)。
|
| 185 |
+
|
| 186 |
+
[24/11/27] 我们支持了 **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** 模型的微调和 **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** 数据集。
|
| 187 |
+
|
| 188 |
+
[24/10/09] 我们支持了从 **[魔乐社区](https://modelers.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔乐社区下载)。
|
| 189 |
+
|
| 190 |
+
[24/09/19] 我们支持了 **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** 模型的微调。
|
| 191 |
+
|
| 192 |
+
[24/08/30] 我们支持了 **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** 模型的微调。感谢 [@simonJJJ](https://github.com/simonJJJ) 的 PR。
|
| 193 |
+
|
| 194 |
+
[24/08/27] 我们支持了 **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**。请使用 `enable_liger_kernel: true` 来加速训练。
|
| 195 |
+
|
| 196 |
+
[24/08/09] 我们支持了 **[Adam-mini](https://github.com/zyushun/Adam-mini)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@relic-yuexi](https://github.com/relic-yuexi) 的 PR。
|
| 197 |
+
|
| 198 |
+
[24/07/04] 我们支持了[无污染打包训练](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing)。请使用 `neat_packing: true` 参数。感谢 [@chuan298](https://github.com/chuan298) 的 PR。
|
| 199 |
+
|
| 200 |
+
[24/06/16] 我们支持了 **[PiSSA](https://arxiv.org/abs/2404.02948)** 算法。详细用法请参照 [examples](examples/README_zh.md)。
|
| 201 |
+
|
| 202 |
+
[24/06/07] 我们支持了 **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** 和 **[GLM-4](https://github.com/THUDM/GLM-4)** 模型的微调。
|
| 203 |
+
|
| 204 |
+
[24/05/26] 我们支持了 **[SimPO](https://arxiv.org/abs/2405.14734)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
| 205 |
+
|
| 206 |
+
[24/05/20] 我们支持了 **PaliGemma** 系列模型的微调。注意 PaliGemma 是预训练模型,你需要使用 `paligemma` 模板进行微调使其获得对话能力。
|
| 207 |
+
|
| 208 |
+
[24/05/18] 我们支持了 **[KTO](https://arxiv.org/abs/2402.01306)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
| 209 |
+
|
| 210 |
+
[24/05/14] 我们支持了昇腾 NPU 设备的训练和推理。详情请查阅[安装](#安装-llama-factory)部分。
|
| 211 |
+
|
| 212 |
+
[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 [examples](examples/README_zh.md)。
|
| 213 |
+
|
| 214 |
+
[24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)。
|
| 215 |
+
|
| 216 |
+
[24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 [examples](examples/README_zh.md)。
|
| 217 |
+
|
| 218 |
+
[24/04/16] 我们支持了 **[BAdam](https://arxiv.org/abs/2404.02827)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
| 219 |
+
|
| 220 |
+
[24/04/16] 我们支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的长序列训练(24GB 可训练 Llama-2-7B-56k)。该方法相比 FlashAttention-2 提供了 **117%** 的训练速度和 **50%** 的显存节约。更多数据请见[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
| 221 |
+
|
| 222 |
+
[24/03/31] 我们支持了 **[ORPO](https://arxiv.org/abs/2403.07691)**。详细用法请参照 [examples](examples/README_zh.md)。
|
| 223 |
+
|
| 224 |
+
[24/03/21] 我们的论文 "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" 可在 arXiv 上查看!
|
| 225 |
+
|
| 226 |
+
[24/03/20] 我们支持了能在 2x24GB GPU 上微调 70B 模型的 **FSDP+QLoRA**。详细用法请参照 [examples](examples/README_zh.md)。
|
| 227 |
+
|
| 228 |
+
[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。详细用法请参照 [examples](examples/README_zh.md)。
|
| 229 |
+
|
| 230 |
+
[24/03/07] 我们支持了 **[GaLore](https://arxiv.org/abs/2403.03507)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
| 231 |
+
|
| 232 |
+
[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `infer_backend: vllm` 来获得 **270%** 的推理速度。
|
| 233 |
+
|
| 234 |
+
[24/02/28] 我们支持了 **[DoRA](https://arxiv.org/abs/2402.09353)** 微调。请使用 `use_dora: true` 参数进行 DoRA 微调。
|
| 235 |
+
|
| 236 |
+
[24/02/15] 我们支持了 [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro) 提出的**块扩展**方法。详细用法请参照 [examples](examples/README_zh.md)。
|
| 237 |
+
|
| 238 |
+
[24/02/05] Qwen1.5(Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。
|
| 239 |
+
|
| 240 |
+
[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `dataset: glaive_toolcall_zh` 即可使模型获得工具调用能力。
|
| 241 |
+
|
| 242 |
+
[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `use_unsloth: true` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
| 243 |
+
|
| 244 |
+
[23/12/12] 我们支持了微调最新的混合专家模型 **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)**。硬件需求请查阅[此处](#硬件依赖)。
|
| 245 |
+
|
| 246 |
+
[23/12/01] 我们支持了从 **[魔搭社区](https://modelscope.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔搭社区下载)。
|
| 247 |
+
|
| 248 |
+
[23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `neftune_noise_alpha: 5` 参数启用 NEFTune。
|
| 249 |
+
|
| 250 |
+
[23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `shift_attn: true` 参数以启用该功能。
|
| 251 |
+
|
| 252 |
+
[23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。详细用法请参照 [examples](examples/README_zh.md)。
|
| 253 |
+
|
| 254 |
+
[23/09/10] 我们支持了 **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU,请使用 `flash_attn: fa2` 参数以启用 FlashAttention-2。
|
| 255 |
+
|
| 256 |
+
[23/08/12] 我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `rope_scaling: linear` 参数训练模型或使用 `rope_scaling: dynamic` 参数评估模型。
|
| 257 |
+
|
| 258 |
+
[23/08/11] 我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。详细用法请参照 [examples](examples/README_zh.md)。
|
| 259 |
+
|
| 260 |
+
[23/07/31] 我们支持了**数据流式加载**。请使用 `streaming: true` 和 `max_steps: 10000` 参数来流式加载数据集。
|
| 261 |
+
|
| 262 |
+
[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。
|
| 263 |
+
|
| 264 |
+
[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。
|
| 265 |
+
|
| 266 |
+
[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。
|
| 267 |
+
|
| 268 |
+
[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。
|
| 269 |
+
|
| 270 |
+
[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。
|
| 271 |
+
|
| 272 |
+
[23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。详细用法请参照 [examples](examples/README_zh.md)。
|
| 273 |
+
|
| 274 |
+
</details>
|
| 275 |
+
|
| 276 |
+
> [!TIP]
|
| 277 |
+
> 如果您无法使用最新的功能,请尝试重新拉取代码并再次安装 LLaMA-Factory。
|
| 278 |
+
|
| 279 |
+
## 模型
|
| 280 |
+
|
| 281 |
+
| 模型名 | 参数量 | Template |
|
| 282 |
+
| ----------------------------------------------------------------- | -------------------------------- | -------------------- |
|
| 283 |
+
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
| 284 |
+
| [DeepSeek (LLM/Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
| 285 |
+
| [DeepSeek 3-3.2](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
| 286 |
+
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
| 287 |
+
| [ERNIE-4.5](https://huggingface.co/baidu) | 0.3B/21B/300B | ernie_nothink |
|
| 288 |
+
| [Falcon/Falcon H1](https://huggingface.co/tiiuae) | 0.5B/1.5B/3B/7B/11B/34B/40B/180B | falcon/falcon_h1 |
|
| 289 |
+
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma/gemma2 |
|
| 290 |
+
| [Gemma 3/Gemma 3n](https://huggingface.co/google) | 270M/1B/4B/6B/8B/12B/27B | gemma3/gemma3n |
|
| 291 |
+
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/zai-org) | 9B/32B | glm4/glmz1 |
|
| 292 |
+
| [GLM-4.5/GLM-4.5(6)V](https://huggingface.co/zai-org) | 9B/106B/355B | glm4_moe/glm4_5v |
|
| 293 |
+
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
| 294 |
+
| [GPT-OSS](https://huggingface.co/openai) | 20B/120B | gpt_oss |
|
| 295 |
+
| [Granite 3-4](https://huggingface.co/ibm-granite) | 1B/2B/3B/7B/8B | granite3/granite4 |
|
| 296 |
+
| [Hunyuan/Hunyuan1.5 (MT)](https://huggingface.co/tencent/) | 0.5B/1.8B/4B/7B/13B | hunyuan/hunyuan_small |
|
| 297 |
+
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
| 298 |
+
| [InternVL 2.5-3.5](https://huggingface.co/OpenGVLab) | 1B/2B/4B/8B/14B/30B/38B/78B/241B | intern_vl |
|
| 299 |
+
| [Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
|
| 300 |
+
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
| 301 |
+
| [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 |
|
| 302 |
+
| [LFM 2.5 (VL)](https://huggingface.co/LiquidAI) | 1.2B/1.6B | lfm2/lfm2_vl |
|
| 303 |
+
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
| 304 |
+
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
| 305 |
+
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
| 306 |
+
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
| 307 |
+
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
| 308 |
+
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
| 309 |
+
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
| 310 |
+
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
| 311 |
+
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B/309B | mimo/mimo_v2 |
|
| 312 |
+
| [MiniCPM 4](https://huggingface.co/openbmb) | 0.5B/8B | cpm4 |
|
| 313 |
+
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
| 314 |
+
| [MiniMax-M1/MiniMax-M2](https://huggingface.co/MiniMaxAI/models) | 229B/456B | minimax1/minimax2 |
|
| 315 |
+
| [Ministral 3](https://huggingface.co/mistralai) | 3B/8B/14B | ministral3 |
|
| 316 |
+
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
| 317 |
+
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
| 318 |
+
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
| 319 |
+
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
| 320 |
+
| [Phi-4-mini/Phi-4](https://huggingface.co/microsoft) | 3.8B/14B | phi4_mini/phi4 |
|
| 321 |
+
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
| 322 |
+
| [Qwen2 (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
| 323 |
+
| [Qwen3 (MoE/Instruct/Thinking/Next)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/80B/235B | qwen3/qwen3_nothink |
|
| 324 |
+
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
| 325 |
+
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
| 326 |
+
| [Qwen3-Omni](https://huggingface.co/Qwen) | 30B | qwen3_omni |
|
| 327 |
+
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
| 328 |
+
| [Qwen3-VL](https://huggingface.co/Qwen) | 2B/4B/8B/30B/32B/235B | qwen3_vl |
|
| 329 |
+
| [Seed (OSS/Coder)](https://huggingface.co/ByteDance-Seed) | 8B/36B | seed_oss/seed_coder |
|
| 330 |
+
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
| 331 |
+
| [TeleChat 2-2.5](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
| 332 |
+
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
| 333 |
+
|
| 334 |
+
> [!NOTE]
|
| 335 |
+
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
| 336 |
+
>
|
| 337 |
+
> 如果模型有推理 / 非推理两个版本,请使用 `_nothink` 后缀来区分不同的模板。例如 `qwen3` 和 `qwen3_nothink`。
|
| 338 |
+
>
|
| 339 |
+
> 请务必在训练和推理时采用**完全一致**的模板。
|
| 340 |
+
>
|
| 341 |
+
> \*:您需要从 main 分支安装 `transformers` 并使用 `DISABLE_VERSION_CHECK=1` 来跳过版本检查。
|
| 342 |
+
>
|
| 343 |
+
> \*\*:您需要安装特定版本的 `transformers` 以使用该模型。
|
| 344 |
+
|
| 345 |
+
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
|
| 346 |
+
|
| 347 |
+
您也可以在 [template.py](src/llamafactory/data/template.py) 中添加自己的对话模板。
|
| 348 |
+
|
| 349 |
+
## 训练方法
|
| 350 |
+
|
| 351 |
+
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
|
| 352 |
+
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
| 353 |
+
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 354 |
+
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 355 |
+
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 356 |
+
| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 357 |
+
| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 358 |
+
| KTO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 359 |
+
| ORPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 360 |
+
| SimPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 361 |
+
|
| 362 |
+
> [!TIP]
|
| 363 |
+
> 有关 PPO 的实现细节,请参考[此博客](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html)。
|
| 364 |
+
|
| 365 |
+
## 数据集
|
| 366 |
+
|
| 367 |
+
<details><summary>预训练数据集</summary>
|
| 368 |
+
|
| 369 |
+
- [Wiki Demo (en)](data/wiki_demo.txt)
|
| 370 |
+
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
|
| 371 |
+
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
|
| 372 |
+
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
|
| 373 |
+
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
| 374 |
+
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
| 375 |
+
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
| 376 |
+
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
| 377 |
+
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
| 378 |
+
- [CCI3-HQ (zh)](https://huggingface.co/datasets/BAAI/CCI3-HQ)
|
| 379 |
+
- [CCI3-Data (zh)](https://huggingface.co/datasets/BAAI/CCI3-Data)
|
| 380 |
+
- [CCI4.0-M2-Base-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Base-v1)
|
| 381 |
+
- [CCI4.0-M2-CoT-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-CoT-v1)
|
| 382 |
+
- [CCI4.0-M2-Extra-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Extra-v1)
|
| 383 |
+
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
| 384 |
+
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
| 385 |
+
|
| 386 |
+
</details>
|
| 387 |
+
|
| 388 |
+
<details><summary>指令微调数据集</summary>
|
| 389 |
+
|
| 390 |
+
- [Identity (en&zh)](data/identity.json)
|
| 391 |
+
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
| 392 |
+
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
|
| 393 |
+
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
| 394 |
+
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
| 395 |
+
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
| 396 |
+
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
| 397 |
+
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
| 398 |
+
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
| 399 |
+
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
|
| 400 |
+
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
|
| 401 |
+
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
| 402 |
+
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
| 403 |
+
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
| 404 |
+
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
| 405 |
+
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
| 406 |
+
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
| 407 |
+
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
|
| 408 |
+
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
|
| 409 |
+
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
|
| 410 |
+
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
|
| 411 |
+
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
|
| 412 |
+
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
|
| 413 |
+
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
| 414 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
| 415 |
+
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
| 416 |
+
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
| 417 |
+
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
| 418 |
+
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
| 419 |
+
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
| 420 |
+
- [Infinity Instruct (zh)](https://huggingface.co/datasets/BAAI/Infinity-Instruct)
|
| 421 |
+
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
| 422 |
+
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
| 423 |
+
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
| 424 |
+
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
| 425 |
+
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
| 426 |
+
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
| 427 |
+
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
| 428 |
+
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
| 429 |
+
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
|
| 430 |
+
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
| 431 |
+
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
|
| 432 |
+
- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
|
| 433 |
+
- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k)
|
| 434 |
+
- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT)
|
| 435 |
+
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
| 436 |
+
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
|
| 437 |
+
- [DLR-Web (en)](https://huggingface.co/datasets/Attention1115/DLR-Web)
|
| 438 |
+
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
| 439 |
+
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
| 440 |
+
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
| 441 |
+
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
|
| 442 |
+
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
|
| 443 |
+
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
|
| 444 |
+
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
|
| 445 |
+
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
|
| 446 |
+
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
|
| 447 |
+
|
| 448 |
+
</details>
|
| 449 |
+
|
| 450 |
+
<details><summary>偏好数据集</summary>
|
| 451 |
+
|
| 452 |
+
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
| 453 |
+
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
| 454 |
+
- [COIG-P (zh)](https://huggingface.co/datasets/m-a-p/COIG-P)
|
| 455 |
+
- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset)
|
| 456 |
+
- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback)
|
| 457 |
+
- [RLAIF-V (en)](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset)
|
| 458 |
+
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
| 459 |
+
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
| 460 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
| 461 |
+
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
| 462 |
+
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
|
| 463 |
+
|
| 464 |
+
</details>
|
| 465 |
+
|
| 466 |
+
部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。
|
| 467 |
+
|
| 468 |
+
```bash
|
| 469 |
+
pip install --upgrade huggingface_hub
|
| 470 |
+
huggingface-cli login
|
| 471 |
+
```
|
| 472 |
+
|
| 473 |
+
## 软硬件依赖
|
| 474 |
+
|
| 475 |
+
| 必需项 | 至少 | 推荐 |
|
| 476 |
+
| ------------ | ------- | --------- |
|
| 477 |
+
| python | 3.9 | 3.10 |
|
| 478 |
+
| torch | 2.0.0 | 2.6.0 |
|
| 479 |
+
| torchvision | 0.15.0 | 0.21.0 |
|
| 480 |
+
| transformers | 4.49.0 | 4.50.0 |
|
| 481 |
+
| datasets | 2.16.0 | 3.2.0 |
|
| 482 |
+
| accelerate | 0.34.0 | 1.2.1 |
|
| 483 |
+
| peft | 0.14.0 | 0.15.1 |
|
| 484 |
+
| trl | 0.8.6 | 0.9.6 |
|
| 485 |
+
|
| 486 |
+
| 可选项 | 至少 | 推荐 |
|
| 487 |
+
| ------------ | ------- | --------- |
|
| 488 |
+
| CUDA | 11.6 | 12.2 |
|
| 489 |
+
| deepspeed | 0.10.0 | 0.16.4 |
|
| 490 |
+
| bitsandbytes | 0.39.0 | 0.43.1 |
|
| 491 |
+
| vllm | 0.4.3 | 0.8.2 |
|
| 492 |
+
| flash-attn | 2.5.6 | 2.7.2 |
|
| 493 |
+
|
| 494 |
+
### 硬件依赖
|
| 495 |
+
|
| 496 |
+
\* *估算值*
|
| 497 |
+
|
| 498 |
+
| 方法 | 精度 | 7B | 14B | 30B | 70B | `x`B |
|
| 499 |
+
| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
| 500 |
+
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
| 501 |
+
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
| 502 |
+
| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
| 503 |
+
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
| 504 |
+
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
| 505 |
+
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
| 506 |
+
|
| 507 |
+
## 如何使用
|
| 508 |
+
|
| 509 |
+
### 安装 LLaMA Factory
|
| 510 |
+
|
| 511 |
+
> [!IMPORTANT]
|
| 512 |
+
> 此步骤为必需。
|
| 513 |
+
|
| 514 |
+
#### 从源码安装
|
| 515 |
+
|
| 516 |
+
```bash
|
| 517 |
+
git clone --depth 1 https://github.com/hiyouga/LlamaFactory.git
|
| 518 |
+
cd LlamaFactory
|
| 519 |
+
pip install -e .
|
| 520 |
+
pip install -r requirements/metrics.txt
|
| 521 |
+
```
|
| 522 |
+
|
| 523 |
+
可选的额外依赖项:`metrics`、`deepspeed`。使用 `pip install -e . && pip install -r requirements/metrics.txt -r requirements/deepspeed.txt` 安装。
|
| 524 |
+
|
| 525 |
+
其他可选依赖项请参考 `examples/requirements/` 目录下的文件。
|
| 526 |
+
|
| 527 |
+
#### 从镜像安装
|
| 528 |
+
|
| 529 |
+
```bash
|
| 530 |
+
docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
|
| 531 |
+
```
|
| 532 |
+
|
| 533 |
+
该镜像基于 Ubuntu 22.04(x86\_64)、CUDA 12.4、Python 3.11、PyTorch 2.6.0 和 Flash-attn 2.7.4 构建。
|
| 534 |
+
|
| 535 |
+
查看全部镜像:https://hub.docker.com/r/hiyouga/llamafactory/tags
|
| 536 |
+
|
| 537 |
+
请参阅[构建 Docker](#构建-docker) 来重新构建镜像。
|
| 538 |
+
|
| 539 |
+
<details><summary>使用 <b>uv</b> 构建虚拟环境</summary>
|
| 540 |
+
|
| 541 |
+
使用 [uv](https://github.com/astral-sh/uv) 创建隔离的 Python 环境:
|
| 542 |
+
|
| 543 |
+
```bash
|
| 544 |
+
uv run llamafactory-cli webui
|
| 545 |
+
```
|
| 546 |
+
|
| 547 |
+
</details>
|
| 548 |
+
|
| 549 |
+
<details><summary>Windows 用户指南</summary>
|
| 550 |
+
|
| 551 |
+
#### 安装 PyTorch
|
| 552 |
+
|
| 553 |
+
Windows 平台需要额外手动安装 GPU 版本的 PyTorch 依赖包,您可以参考[官方网站](https://pytorch.org/get-started/locally/)和以下命令安装并测试 PyTorch 是否正确安装。
|
| 554 |
+
|
| 555 |
+
```bash
|
| 556 |
+
pip uninstall torch torchvision torchaudio
|
| 557 |
+
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
|
| 558 |
+
python -c "import torch; print(torch.cuda.is_available())"
|
| 559 |
+
```
|
| 560 |
+
|
| 561 |
+
如果看到 `True` 则说明安装成功。
|
| 562 |
+
|
| 563 |
+
若遇到类似 `Can't pickle local object` 的报错,请设置 `dataloader_num_workers: 0`。
|
| 564 |
+
|
| 565 |
+
#### 安装 BitsAndBytes
|
| 566 |
+
|
| 567 |
+
如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。
|
| 568 |
+
|
| 569 |
+
```bash
|
| 570 |
+
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
| 571 |
+
```
|
| 572 |
+
|
| 573 |
+
#### 安装 Flash Attention-2
|
| 574 |
+
|
| 575 |
+
如果要在 Windows 平台上开启 FlashAttention-2,请使用 [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) 中的脚本自行编译与安装。
|
| 576 |
+
|
| 577 |
+
</details>
|
| 578 |
+
|
| 579 |
+
<details><summary>昇腾 NPU 用户指南</summary>
|
| 580 |
+
|
| 581 |
+
在昇腾 NPU 设备上安装 LLaMA Factory 时,请升级 Python 到 3.10 及以上,并需要指定额外依赖项,使用 `pip install -r requirements/npu.txt` 命令安装。此外,还需要安装 **Ascend CANN Toolkit 与 Kernels**,安装方法请参考[安装教程](https://llamafactory.readthedocs.io/zh-cn/latest/advanced/npu_installation.html)。
|
| 582 |
+
|
| 583 |
+
您可以直接下载预安装的最新docker镜像:
|
| 584 |
+
|
| 585 |
+
```bash
|
| 586 |
+
# Docker Hub
|
| 587 |
+
docker pull hiyouga/llamafactory:latest-npu-a2
|
| 588 |
+
docker pull hiyouga/llamafactory:latest-npu-a3
|
| 589 |
+
|
| 590 |
+
# quay.io
|
| 591 |
+
docker pull quay.io/ascend/llamafactory:latest-npu-a2
|
| 592 |
+
docker pull quay.io/ascend/llamafactory:latest-npu-a3
|
| 593 |
+
```
|
| 594 |
+
|
| 595 |
+
#### 安装 BitsAndBytes
|
| 596 |
+
|
| 597 |
+
如果要在 Ascend NPU 上进行基于 bitsandbytes 的 QLoRA 量化微调,请执行如下步骤:
|
| 598 |
+
|
| 599 |
+
1. 手动编译 bitsandbytes:请参考[安装文档](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU)完成 NPU 版的 bitsandbytes 安装,编译要求环境 cmake 版本不低于 3.22.1,g++ 版本不低于 12.x。
|
| 600 |
+
|
| 601 |
+
```bash
|
| 602 |
+
# 从源码安装 bitsandbytes
|
| 603 |
+
# 克隆 bitsandbytes 仓库, Ascend NPU 目前在 multi-backend-refactor 中支持
|
| 604 |
+
git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
|
| 605 |
+
cd bitsandbytes/
|
| 606 |
+
|
| 607 |
+
# 安装依赖
|
| 608 |
+
pip install -r requirements-dev.txt
|
| 609 |
+
|
| 610 |
+
# 安装编译工具依赖,该步骤在不同系统上命令有所不同,供参考
|
| 611 |
+
apt-get install -y build-essential cmake
|
| 612 |
+
|
| 613 |
+
# 编译 & 安装
|
| 614 |
+
cmake -DCOMPUTE_BACKEND=npu -S .
|
| 615 |
+
make
|
| 616 |
+
pip install .
|
| 617 |
+
```
|
| 618 |
+
|
| 619 |
+
2. 安装 transformers 的 main 分支版本。
|
| 620 |
+
|
| 621 |
+
```bash
|
| 622 |
+
git clone -b main https://github.com/huggingface/transformers.git
|
| 623 |
+
cd transformers
|
| 624 |
+
pip install .
|
| 625 |
+
```
|
| 626 |
+
|
| 627 |
+
3. 在训练参数中设置 `double_quantization: false`,可参考[示例](examples/train_qlora/qwen3_lora_sft_bnb_npu.yaml)。
|
| 628 |
+
|
| 629 |
+
</details>
|
| 630 |
+
|
| 631 |
+
### 数据准备
|
| 632 |
+
|
| 633 |
+
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope / Modelers 上的数据集或加载本地数据集。
|
| 634 |
+
|
| 635 |
+
> [!NOTE]
|
| 636 |
+
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
|
| 637 |
+
|
| 638 |
+
您也可以使用 **[Easy Dataset](https://github.com/ConardLi/easy-dataset)**、**[DataFlow](https://github.com/OpenDCAI/DataFlow)** 和 **[GraphGen](https://github.com/open-sciencelab/GraphGen)** 构建用于微调的合成数据。
|
| 639 |
+
|
| 640 |
+
### 快速开始
|
| 641 |
+
|
| 642 |
+
下面三行命令分别对 Qwen3-4B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。
|
| 643 |
+
|
| 644 |
+
```bash
|
| 645 |
+
llamafactory-cli train examples/train_lora/qwen3_lora_sft.yaml
|
| 646 |
+
llamafactory-cli chat examples/inference/qwen3_lora_sft.yaml
|
| 647 |
+
llamafactory-cli export examples/merge_lora/qwen3_lora_sft.yaml
|
| 648 |
+
```
|
| 649 |
+
|
| 650 |
+
高级用法请参考 [examples/README_zh.md](examples/README_zh.md)(包括多 GPU 微调)。
|
| 651 |
+
|
| 652 |
+
> [!TIP]
|
| 653 |
+
> 使用 `llamafactory-cli help` 显示帮助信息。
|
| 654 |
+
>
|
| 655 |
+
> 遇到报错请先看[常见���题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)。
|
| 656 |
+
|
| 657 |
+
### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动)
|
| 658 |
+
|
| 659 |
+
```bash
|
| 660 |
+
llamafactory-cli webui
|
| 661 |
+
```
|
| 662 |
+
|
| 663 |
+
### LLaMA Factory Online 在线微调
|
| 664 |
+
|
| 665 |
+
详情阅读该[文档](https://docs.llamafactory.com.cn/docs/documents/quickstart/getstarted/?utm_source=LLaMA-Factory)。
|
| 666 |
+
|
| 667 |
+
### 构建 Docker
|
| 668 |
+
|
| 669 |
+
CUDA 用户:
|
| 670 |
+
|
| 671 |
+
```bash
|
| 672 |
+
cd docker/docker-cuda/
|
| 673 |
+
docker compose up -d
|
| 674 |
+
docker compose exec llamafactory bash
|
| 675 |
+
```
|
| 676 |
+
|
| 677 |
+
昇腾 NPU 用户:
|
| 678 |
+
|
| 679 |
+
```bash
|
| 680 |
+
cd docker/docker-npu/
|
| 681 |
+
docker compose up -d
|
| 682 |
+
docker compose exec llamafactory bash
|
| 683 |
+
```
|
| 684 |
+
|
| 685 |
+
AMD ROCm 用户:
|
| 686 |
+
|
| 687 |
+
```bash
|
| 688 |
+
cd docker/docker-rocm/
|
| 689 |
+
docker compose up -d
|
| 690 |
+
docker compose exec llamafactory bash
|
| 691 |
+
```
|
| 692 |
+
|
| 693 |
+
<details><summary>不使用 Docker Compose 构建</summary>
|
| 694 |
+
|
| 695 |
+
CUDA 用户:
|
| 696 |
+
|
| 697 |
+
```bash
|
| 698 |
+
docker build -f ./docker/docker-cuda/Dockerfile \
|
| 699 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
| 700 |
+
--build-arg EXTRAS=metrics \
|
| 701 |
+
-t llamafactory:latest .
|
| 702 |
+
|
| 703 |
+
docker run -dit --ipc=host --gpus=all \
|
| 704 |
+
-p 7860:7860 \
|
| 705 |
+
-p 8000:8000 \
|
| 706 |
+
--name llamafactory \
|
| 707 |
+
llamafactory:latest
|
| 708 |
+
|
| 709 |
+
docker exec -it llamafactory bash
|
| 710 |
+
```
|
| 711 |
+
|
| 712 |
+
昇腾 NPU 用户:
|
| 713 |
+
|
| 714 |
+
```bash
|
| 715 |
+
docker build -f ./docker/docker-npu/Dockerfile \
|
| 716 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
| 717 |
+
--build-arg EXTRAS=torch-npu,metrics \
|
| 718 |
+
-t llamafactory:latest .
|
| 719 |
+
|
| 720 |
+
docker run -dit --ipc=host \
|
| 721 |
+
-v /usr/local/dcmi:/usr/local/dcmi \
|
| 722 |
+
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
| 723 |
+
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
| 724 |
+
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
| 725 |
+
-p 7860:7860 \
|
| 726 |
+
-p 8000:8000 \
|
| 727 |
+
--device /dev/davinci0 \
|
| 728 |
+
--device /dev/davinci_manager \
|
| 729 |
+
--device /dev/devmm_svm \
|
| 730 |
+
--device /dev/hisi_hdc \
|
| 731 |
+
--name llamafactory \
|
| 732 |
+
llamafactory:latest
|
| 733 |
+
|
| 734 |
+
docker exec -it llamafactory bash
|
| 735 |
+
```
|
| 736 |
+
|
| 737 |
+
AMD ROCm 用户:
|
| 738 |
+
|
| 739 |
+
```bash
|
| 740 |
+
docker build -f ./docker/docker-rocm/Dockerfile \
|
| 741 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
| 742 |
+
--build-arg EXTRAS=metrics \
|
| 743 |
+
-t llamafactory:latest .
|
| 744 |
+
|
| 745 |
+
docker run -dit --ipc=host \
|
| 746 |
+
-p 7860:7860 \
|
| 747 |
+
-p 8000:8000 \
|
| 748 |
+
--device /dev/kfd \
|
| 749 |
+
--device /dev/dri \
|
| 750 |
+
--name llamafactory \
|
| 751 |
+
llamafactory:latest
|
| 752 |
+
|
| 753 |
+
docker exec -it llamafactory bash
|
| 754 |
+
```
|
| 755 |
+
|
| 756 |
+
</details>
|
| 757 |
+
|
| 758 |
+
<details><summary>使用数据卷</summary>
|
| 759 |
+
|
| 760 |
+
您可以通过移除 Dockerfile 中 `VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]` 的注释来使用数据卷。
|
| 761 |
+
|
| 762 |
+
在构建 Docker 时使用参数 `-v ./hf_cache:/root/.cache/huggingface` 来挂载数据卷。各个数据卷的含义表示如下。
|
| 763 |
+
|
| 764 |
+
- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹。
|
| 765 |
+
- `shared_data`:宿主机中存放数据集的文件夹路径。
|
| 766 |
+
- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
| 767 |
+
|
| 768 |
+
</details>
|
| 769 |
+
|
| 770 |
+
### 利用 vLLM 部署 OpenAI API
|
| 771 |
+
|
| 772 |
+
```bash
|
| 773 |
+
API_PORT=8000 llamafactory-cli api examples/inference/qwen3.yaml infer_backend=vllm vllm_enforce_eager=true
|
| 774 |
+
```
|
| 775 |
+
|
| 776 |
+
> [!TIP]
|
| 777 |
+
> API 文档请查阅[这里](https://platform.openai.com/docs/api-reference/chat/create)。
|
| 778 |
+
>
|
| 779 |
+
> 示例:[图像理解](scripts/api_example/test_image.py) | [工具调用](scripts/api_example/test_toolcall.py)
|
| 780 |
+
|
| 781 |
+
### 从魔搭社区下载
|
| 782 |
+
|
| 783 |
+
如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。
|
| 784 |
+
|
| 785 |
+
```bash
|
| 786 |
+
export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
| 787 |
+
```
|
| 788 |
+
|
| 789 |
+
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`。
|
| 790 |
+
|
| 791 |
+
### 从魔乐社区下载
|
| 792 |
+
|
| 793 |
+
您也可以通过下述方法,使用魔乐社区下载数据集和模型。
|
| 794 |
+
|
| 795 |
+
```bash
|
| 796 |
+
export USE_OPENMIND_HUB=1 # Windows 使用 `set USE_OPENMIND_HUB=1`
|
| 797 |
+
```
|
| 798 |
+
|
| 799 |
+
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔乐社区](https://modelers.cn/models)查看所有可用的模型,例如 `TeleAI/TeleChat-7B-pt`。
|
| 800 |
+
|
| 801 |
+
### 使用 W&B 面板
|
| 802 |
+
|
| 803 |
+
若要使用 [Weights & Biases](https://wandb.ai) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
| 804 |
+
|
| 805 |
+
```yaml
|
| 806 |
+
report_to: wandb
|
| 807 |
+
run_name: test_run # 可选
|
| 808 |
+
```
|
| 809 |
+
|
| 810 |
+
在启动训练任务时,将 `WANDB_API_KEY` 设置为[密钥](https://wandb.ai/authorize)来登录 W&B 账户。
|
| 811 |
+
|
| 812 |
+
### 使用 SwanLab 面板
|
| 813 |
+
|
| 814 |
+
若要使用 [SwanLab](https://github.com/SwanHubX/SwanLab) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
| 815 |
+
|
| 816 |
+
```yaml
|
| 817 |
+
use_swanlab: true
|
| 818 |
+
swanlab_run_name: test_run # 可选
|
| 819 |
+
```
|
| 820 |
+
|
| 821 |
+
在启动训练任务时,登录SwanLab账户有以下三种方式:
|
| 822 |
+
|
| 823 |
+
方式一:在 yaml 文件中添加 `swanlab_api_key=<your_api_key>` ,并设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
| 824 |
+
方式二:将环境变量 `SWANLAB_API_KEY` 设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
| 825 |
+
方式三:启动前使用 `swanlab login` 命令完成登录。
|
| 826 |
+
|
| 827 |
+
## 使用了 LLaMA Factory ���项目
|
| 828 |
+
|
| 829 |
+
如果您有项目希望添加至下述列表,请通过邮件联系或者创建一个 PR。
|
| 830 |
+
|
| 831 |
+
<details><summary>点击显示</summary>
|
| 832 |
+
|
| 833 |
+
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
|
| 834 |
+
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
|
| 835 |
+
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
| 836 |
+
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
| 837 |
+
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
| 838 |
+
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
| 839 |
+
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
| 840 |
+
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
| 841 |
+
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
| 842 |
+
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
| 843 |
+
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
| 844 |
+
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
| 845 |
+
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
| 846 |
+
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
| 847 |
+
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
| 848 |
+
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
| 849 |
+
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
| 850 |
+
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
| 851 |
+
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
| 852 |
+
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
| 853 |
+
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
| 854 |
+
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
| 855 |
+
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
| 856 |
+
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
| 857 |
+
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
| 858 |
+
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
| 859 |
+
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
| 860 |
+
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
| 861 |
+
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
| 862 |
+
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
| 863 |
+
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
| 864 |
+
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
| 865 |
+
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
| 866 |
+
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
| 867 |
+
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
| 868 |
+
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
| 869 |
+
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
| 870 |
+
1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760)
|
| 871 |
+
1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378)
|
| 872 |
+
1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055)
|
| 873 |
+
1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739)
|
| 874 |
+
1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816)
|
| 875 |
+
1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215)
|
| 876 |
+
1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30)
|
| 877 |
+
1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380)
|
| 878 |
+
1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106)
|
| 879 |
+
1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136)
|
| 880 |
+
1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496)
|
| 881 |
+
1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688)
|
| 882 |
+
1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955)
|
| 883 |
+
1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973)
|
| 884 |
+
1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115)
|
| 885 |
+
1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815)
|
| 886 |
+
1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099)
|
| 887 |
+
1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173)
|
| 888 |
+
1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074)
|
| 889 |
+
1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408)
|
| 890 |
+
1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546)
|
| 891 |
+
1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695)
|
| 892 |
+
1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233)
|
| 893 |
+
1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069)
|
| 894 |
+
1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh's Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25)
|
| 895 |
+
1. Li et al. Calibrating LLMs with Preference Optimization on Thought Trees for Generating Rationale in Science Question Scoring. 2024. [[arxiv]](https://arxiv.org/abs/2406.19949)
|
| 896 |
+
1. Yang et al. Financial Knowledge Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2407.00365)
|
| 897 |
+
1. Lin et al. DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging. 2024. [[arxiv]](https://arxiv.org/abs/2407.01470)
|
| 898 |
+
1. Bako et al. Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization. 2024. [[arxiv]](https://arxiv.org/abs/2407.06129)
|
| 899 |
+
1. Huang et al. RoLoRA: Fine-tuning Rotated Outlier-free LLMs for Effective Weight-Activation Quantization. 2024. [[arxiv]](https://arxiv.org/abs/2407.08044)
|
| 900 |
+
1. Jiang et al. LLM-Collaboration on Automatic Science Journalism for the General Audience. 2024. [[arxiv]](https://arxiv.org/abs/2407.09756)
|
| 901 |
+
1. Inouye et al. Applied Auto-tuning on LoRA Hyperparameters. 2024. [[paper]](https://scholarcommons.scu.edu/cseng_senior/272/)
|
| 902 |
+
1. Qi et al. Research on Tibetan Tourism Viewpoints information generation system based on LLM. 2024. [[arxiv]](https://arxiv.org/abs/2407.13561)
|
| 903 |
+
1. Xu et al. Course-Correction: Safety Alignment Using Synthetic Preferences. 2024. [[arxiv]](https://arxiv.org/abs/2407.16637)
|
| 904 |
+
1. Sun et al. LAMBDA: A Large Model Based Data Agent. 2024. [[arxiv]](https://arxiv.org/abs/2407.17535)
|
| 905 |
+
1. Zhu et al. CollectiveSFT: Scaling Large Language Models for Chinese Medical Benchmark with Collective Instructions in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2407.19705)
|
| 906 |
+
1. Yu et al. Correcting Negative Bias in Large Language Models through Negative Attention Score Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2408.00137)
|
| 907 |
+
1. Xie et al. The Power of Personalized Datasets: Advancing Chinese Composition Writing for Elementary School through Targeted Model Fine-Tuning. IALP 2024. [[paper]](https://www.asianlp.sg/conferences/ialp2024/proceedings/papers/IALP2024_P055.pdf)
|
| 908 |
+
1. Liu et al. Instruct-Code-Llama: Improving Capabilities of Language Model in Competition Level Code Generation by Online Judge Feedback. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_11)
|
| 909 |
+
1. Wang et al. Cybernetic Sentinels: Unveiling the Impact of Safety Data Selection on Model Security in Supervised Fine-Tuning. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_23)
|
| 910 |
+
1. Xia et al. Understanding the Performance and Estimating the Cost of LLM Fine-Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2408.04693)
|
| 911 |
+
1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168)
|
| 912 |
+
1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/)
|
| 913 |
+
1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072)
|
| 914 |
+
1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611)
|
| 915 |
+
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
|
| 916 |
+
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
|
| 917 |
+
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
|
| 918 |
+
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
|
| 919 |
+
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
|
| 920 |
+
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
| 921 |
+
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
|
| 922 |
+
1. **[AutoRE](https://github.com/THUDM/AutoRE)**:基于大语言模型的文档级关系抽取系统。
|
| 923 |
+
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**:在 Windows 主机上利用英伟达 RTX 设备进行大型语言模型微调的开发包。
|
| 924 |
+
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**:一个低代码构建多 Agent 大模型应用的开发工具,支持基于 LLaMA Factory 的模型微调.
|
| 925 |
+
1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**:一个全链路 RAG 检索模型微调、推理和蒸馏代码库。[[blog]](https://zhuanlan.zhihu.com/p/987727357)
|
| 926 |
+
1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**:一个魔改后的代码库,通过 Ring Attention 支持长序列的 SFT 和 DPO 训练。
|
| 927 |
+
1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**:由 NovaSky AI 微调的低成本类 o1 长推理模型。
|
| 928 |
+
1. **[WeClone](https://github.com/xming521/WeClone)**:从聊天记录创造数字分身的一站式解决方案。
|
| 929 |
+
|
| 930 |
+
</details>
|
| 931 |
+
|
| 932 |
+
## 协议
|
| 933 |
+
|
| 934 |
+
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
|
| 935 |
+
|
| 936 |
+
使用模型权重时,请遵循对应的模型协议:[BLOOM](https://huggingface.co/spaces/bigscience/license)/ [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Llama 4](https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
| 937 |
+
|
| 938 |
+
## 引用
|
| 939 |
+
|
| 940 |
+
如果您觉得此项目有帮助,请考虑以下列格式引用
|
| 941 |
+
|
| 942 |
+
```bibtex
|
| 943 |
+
@inproceedings{zheng2024llamafactory,
|
| 944 |
+
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
| 945 |
+
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma},
|
| 946 |
+
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
|
| 947 |
+
address={Bangkok, Thailand},
|
| 948 |
+
publisher={Association for Computational Linguistics},
|
| 949 |
+
year={2024},
|
| 950 |
+
url={http://arxiv.org/abs/2403.13372}
|
| 951 |
+
}
|
| 952 |
+
```
|
| 953 |
+
|
| 954 |
+
## 致谢
|
| 955 |
+
|
| 956 |
+
本项目受益于 [PEFT](https://github.com/huggingface/peft)、[TRL](https://github.com/huggingface/trl)、[QLoRA](https://github.com/artidoro/qlora) 和 [FastChat](https://github.com/lm-sys/FastChat),感谢以上诸位作者的付出。
|
| 957 |
+
|
| 958 |
+
## Star History
|
| 959 |
+
|
| 960 |
+

|
LlamaFactory/pyproject.toml
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["hatchling"]
|
| 3 |
+
build-backend = "hatchling.build"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "llamafactory"
|
| 7 |
+
dynamic = ["version"]
|
| 8 |
+
description = "Unified Efficient Fine-Tuning of 100+ LLMs"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
license = "Apache-2.0"
|
| 11 |
+
requires-python = ">=3.11.0"
|
| 12 |
+
authors = [
|
| 13 |
+
{ name = "hiyouga", email = "hiyouga@buaa.edu.cn" }
|
| 14 |
+
]
|
| 15 |
+
keywords = [
|
| 16 |
+
"AI",
|
| 17 |
+
"LLM",
|
| 18 |
+
"GPT",
|
| 19 |
+
"ChatGPT",
|
| 20 |
+
"Llama",
|
| 21 |
+
"Transformer",
|
| 22 |
+
"DeepSeek",
|
| 23 |
+
"Pytorch"
|
| 24 |
+
]
|
| 25 |
+
classifiers = [
|
| 26 |
+
"Development Status :: 4 - Beta",
|
| 27 |
+
"Intended Audience :: Developers",
|
| 28 |
+
"Intended Audience :: Education",
|
| 29 |
+
"Intended Audience :: Science/Research",
|
| 30 |
+
"License :: OSI Approved :: Apache Software License",
|
| 31 |
+
"Operating System :: OS Independent",
|
| 32 |
+
"Programming Language :: Python :: 3",
|
| 33 |
+
"Programming Language :: Python :: 3.11",
|
| 34 |
+
"Programming Language :: Python :: 3.12",
|
| 35 |
+
"Programming Language :: Python :: 3.13",
|
| 36 |
+
"Topic :: Scientific/Engineering :: Artificial Intelligence"
|
| 37 |
+
]
|
| 38 |
+
dependencies = [
|
| 39 |
+
# core deps
|
| 40 |
+
"torch>=2.4.0",
|
| 41 |
+
"torchvision>=0.19.0",
|
| 42 |
+
"torchaudio>=2.4.0",
|
| 43 |
+
"transformers>=4.51.0,<=5.0.0,!=4.52.0,!=4.57.0",
|
| 44 |
+
"datasets>=2.16.0,<=4.0.0",
|
| 45 |
+
"accelerate>=1.3.0,<=1.11.0",
|
| 46 |
+
"peft>=0.18.0,<=0.18.1",
|
| 47 |
+
"trl>=0.18.0,<=0.24.0",
|
| 48 |
+
"torchdata>=0.10.0,<=0.11.0",
|
| 49 |
+
# gui
|
| 50 |
+
"gradio>=4.38.0,<=5.50.0",
|
| 51 |
+
"matplotlib>=3.7.0",
|
| 52 |
+
"tyro<0.9.0",
|
| 53 |
+
# ops
|
| 54 |
+
"einops",
|
| 55 |
+
"numpy",
|
| 56 |
+
"pandas",
|
| 57 |
+
"scipy",
|
| 58 |
+
# model and tokenizer
|
| 59 |
+
"sentencepiece",
|
| 60 |
+
"tiktoken",
|
| 61 |
+
"modelscope",
|
| 62 |
+
"hf-transfer",
|
| 63 |
+
"safetensors",
|
| 64 |
+
# python
|
| 65 |
+
"av>=10.0.0,<=16.0.0",
|
| 66 |
+
"fire",
|
| 67 |
+
"omegaconf",
|
| 68 |
+
"packaging",
|
| 69 |
+
"protobuf",
|
| 70 |
+
"pyyaml",
|
| 71 |
+
"pydantic",
|
| 72 |
+
# api
|
| 73 |
+
"uvicorn",
|
| 74 |
+
"fastapi",
|
| 75 |
+
"sse-starlette",
|
| 76 |
+
]
|
| 77 |
+
|
| 78 |
+
[project.scripts]
|
| 79 |
+
llamafactory-cli = "llamafactory.cli:main"
|
| 80 |
+
lmf = "llamafactory.cli:main"
|
| 81 |
+
|
| 82 |
+
[project.urls]
|
| 83 |
+
Homepage = "https://github.com/hiyouga/LLaMA-Factory"
|
| 84 |
+
Repository = "https://github.com/hiyouga/LLaMA-Factory"
|
| 85 |
+
|
| 86 |
+
[tool.hatch.build.targets.wheel]
|
| 87 |
+
packages = ["src/llamafactory"]
|
| 88 |
+
|
| 89 |
+
[tool.hatch.version]
|
| 90 |
+
path = "src/llamafactory/extras/env.py"
|
| 91 |
+
pattern = "VERSION = \"(?P<version>[^\"]+)\""
|
| 92 |
+
|
| 93 |
+
[tool.ruff]
|
| 94 |
+
target-version = "py311"
|
| 95 |
+
line-length = 119
|
| 96 |
+
indent-width = 4
|
| 97 |
+
|
| 98 |
+
[tool.ruff.lint]
|
| 99 |
+
ignore = [
|
| 100 |
+
"C408", # collection
|
| 101 |
+
"C901", # complex
|
| 102 |
+
"E501", # line too long
|
| 103 |
+
"E731", # lambda function
|
| 104 |
+
"E741", # ambiguous var name
|
| 105 |
+
"UP007", # no upgrade union
|
| 106 |
+
"UP045", # no upgrade optional
|
| 107 |
+
"D100", # no doc public module
|
| 108 |
+
"D101", # no doc public class
|
| 109 |
+
"D102", # no doc public method
|
| 110 |
+
"D103", # no doc public function
|
| 111 |
+
"D104", # no doc public package
|
| 112 |
+
"D105", # no doc magic method
|
| 113 |
+
"D107", # no doc __init__
|
| 114 |
+
]
|
| 115 |
+
extend-select = [
|
| 116 |
+
"C", # complexity
|
| 117 |
+
"E", # error
|
| 118 |
+
"F", # pyflakes
|
| 119 |
+
"I", # isort
|
| 120 |
+
"W", # warning
|
| 121 |
+
"UP", # pyupgrade
|
| 122 |
+
"D", # pydocstyle
|
| 123 |
+
"PT009", # pytest assert
|
| 124 |
+
"RUF022", # sort __all__
|
| 125 |
+
]
|
| 126 |
+
|
| 127 |
+
[tool.ruff.lint.isort]
|
| 128 |
+
lines-after-imports = 2
|
| 129 |
+
known-first-party = ["llamafactory"]
|
| 130 |
+
known-third-party = [
|
| 131 |
+
"accelerate",
|
| 132 |
+
"datasets",
|
| 133 |
+
"gradio",
|
| 134 |
+
"numpy",
|
| 135 |
+
"peft",
|
| 136 |
+
"torch",
|
| 137 |
+
"transformers",
|
| 138 |
+
"trl",
|
| 139 |
+
]
|
| 140 |
+
|
| 141 |
+
[tool.ruff.lint.pydocstyle]
|
| 142 |
+
convention = "google"
|
| 143 |
+
|
| 144 |
+
[tool.ruff.format]
|
| 145 |
+
quote-style = "double"
|
| 146 |
+
indent-style = "space"
|
| 147 |
+
docstring-code-format = true
|
| 148 |
+
skip-magic-trailing-comma = false
|
| 149 |
+
line-ending = "auto"
|
LlamaFactory/wandb/debug-cli.root.log
ADDED
|
File without changes
|
LlamaFactory/wandb/debug-internal.log
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2026-02-11T03:55:40.10138239Z","level":"INFO","msg":"stream: starting","core version":"0.24.2"}
|
| 2 |
+
{"time":"2026-02-11T03:55:40.456868425Z","level":"INFO","msg":"stream: created new stream","id":"7vgn4sn5"}
|
| 3 |
+
{"time":"2026-02-11T03:55:40.457669418Z","level":"INFO","msg":"handler: started","stream_id":"7vgn4sn5"}
|
| 4 |
+
{"time":"2026-02-11T03:55:40.460178531Z","level":"INFO","msg":"stream: started","id":"7vgn4sn5"}
|
| 5 |
+
{"time":"2026-02-11T03:55:40.460254263Z","level":"INFO","msg":"writer: started","stream_id":"7vgn4sn5"}
|
| 6 |
+
{"time":"2026-02-11T03:55:40.460285384Z","level":"INFO","msg":"sender: started","stream_id":"7vgn4sn5"}
|
| 7 |
+
{"time":"2026-02-11T23:24:56.274798427Z","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/markmochi200-linksome-ai/llamafactory/7vgn4sn5/file_stream","body":"\n<html><head>\n<meta http-equiv=\"content-type\" content=\"text/html;charset=utf-8\">\n<title>502 Server Error</title>\n</head>\n<body text=#000000 bgcolor=#ffffff>\n<h1>Error: Server Error</h1>\n<h2>The server encountered a temporary error and could not complete your request.<p>Please try again in 30 seconds.</h2>\n<h2></h2>\n</body></html>\n"}
|
| 8 |
+
{"time":"2026-02-12T10:15:26.287337035Z","level":"INFO","msg":"stream: closing","id":"7vgn4sn5"}
|
LlamaFactory/wandb/debug.log
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-02-11 03:55:39,847 INFO MainThread:874 [wandb_setup.py:_flush():81] Current SDK version is 0.24.2
|
| 2 |
+
2026-02-11 03:55:39,847 INFO MainThread:874 [wandb_setup.py:_flush():81] Configure stats pid to 874
|
| 3 |
+
2026-02-11 03:55:39,848 INFO MainThread:874 [wandb_setup.py:_flush():81] Loading settings from environment variables
|
| 4 |
+
2026-02-11 03:55:39,849 INFO MainThread:874 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /workspace/LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/logs/debug.log
|
| 5 |
+
2026-02-11 03:55:39,850 INFO MainThread:874 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /workspace/LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/logs/debug-internal.log
|
| 6 |
+
2026-02-11 03:55:39,851 INFO MainThread:874 [wandb_init.py:init():844] calling init triggers
|
| 7 |
+
2026-02-11 03:55:39,852 INFO MainThread:874 [wandb_init.py:init():849] wandb.init called with sweep_config: {}
|
| 8 |
+
config: {'_wandb': {}}
|
| 9 |
+
2026-02-11 03:55:39,852 INFO MainThread:874 [wandb_init.py:init():892] starting backend
|
| 10 |
+
2026-02-11 03:55:40,085 INFO MainThread:874 [wandb_init.py:init():895] sending inform_init request
|
| 11 |
+
2026-02-11 03:55:40,096 INFO MainThread:874 [wandb_init.py:init():903] backend started and connected
|
| 12 |
+
2026-02-11 03:55:40,100 INFO MainThread:874 [wandb_init.py:init():973] updated telemetry
|
| 13 |
+
2026-02-11 03:55:40,181 INFO MainThread:874 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout
|
| 14 |
+
2026-02-11 03:55:40,895 INFO MainThread:874 [wandb_init.py:init():1042] starting run threads in backend
|
| 15 |
+
2026-02-11 03:55:41,100 INFO MainThread:874 [wandb_run.py:_console_start():2529] atexit reg
|
| 16 |
+
2026-02-11 03:55:41,100 INFO MainThread:874 [wandb_run.py:_redirect():2377] redirect: wrap_raw
|
| 17 |
+
2026-02-11 03:55:41,101 INFO MainThread:874 [wandb_run.py:_redirect():2446] Wrapping output streams.
|
| 18 |
+
2026-02-11 03:55:41,101 INFO MainThread:874 [wandb_run.py:_redirect():2469] Redirects installed.
|
| 19 |
+
2026-02-11 03:55:41,110 INFO MainThread:874 [wandb_init.py:init():1082] run started, returning control to user process
|
| 20 |
+
2026-02-11 03:55:41,113 INFO MainThread:874 [wandb_run.py:_config_callback():1404] config_cb None None {'peft_config': {'default': {'task_type': 'CAUSAL_LM', 'peft_type': 'LORA', 'auto_mapping': None, 'peft_version': '0.18.1', 'base_model_name_or_path': '/workspace/Qwen/Qwen3-8B-Base', 'revision': None, 'inference_mode': False, 'r': 32, 'target_modules': ['q_proj', 'v_proj', 'down_proj', 'up_proj', 'gate_proj', 'k_proj', 'o_proj'], 'exclude_modules': None, 'lora_alpha': 64, 'lora_dropout': 0.03, 'fan_in_fan_out': False, 'bias': 'none', 'use_rslora': False, 'modules_to_save': None, 'init_lora_weights': True, 'layers_to_transform': None, 'layers_pattern': None, 'rank_pattern': {}, 'alpha_pattern': {}, 'megatron_config': None, 'megatron_core': 'megatron.core', 'trainable_token_indices': None, 'loftq_config': {}, 'eva_config': None, 'corda_config': None, 'use_dora': False, 'alora_invocation_tokens': None, 'use_qalora': False, 'qalora_group_size': 16, 'layer_replication': None, 'runtime_config': {'ephemeral_gpu_offload': False}, 'lora_bias': False, 'target_parameters': None, 'arrow_config': None, 'ensure_weight_tying': False}}, 'vocab_size': 151936, 'max_position_embeddings': 32768, 'hidden_size': 4096, 'intermediate_size': 12288, 'num_hidden_layers': 36, 'num_attention_heads': 32, 'use_sliding_window': False, 'sliding_window': None, 'max_window_layers': 36, 'num_key_value_heads': 8, 'head_dim': 128, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-06, 'use_cache': False, 'attention_bias': False, 'attention_dropout': 0.0, 'layer_types': ['full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention', 'full_attention'], 'pad_token_id': 151643, 'bos_token_id': None, 'eos_token_id': 151645, 'tie_word_embeddings': False, 'rope_parameters': {'rope_theta': 1000000, 'rope_type': 'default'}, 'return_dict': True, 'output_hidden_states': False, 'dtype': 'bfloat16', 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'architectures': ['Qwen3ForCausalLM'], 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'problem_type': None, '_name_or_path': '/workspace/Qwen/Qwen3-8B-Base', 'transformers_version': '5.0.0', 'model_type': 'qwen3', 'output_attentions': False, 'output_dir': '/workspace/v127rc_exp2/B_mup', 'do_train': True, 'do_eval': False, 'do_predict': False, 'eval_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 1, 'per_device_eval_batch_size': 8, 'gradient_accumulation_steps': 8, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 0.0001, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.95, 'adam_epsilon': 1e-08, 'max_grad_norm': 1, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'cosine', 'lr_scheduler_kwargs': None, 'warmup_ratio': 0.01, 'warmup_steps': 0.01, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': None, 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 100, 'save_total_limit': None, 'enable_jit_checkpoint': False, 'save_on_each_node': False, 'save_only_model': True, 'restore_callback_states_from_checkpoint': False, 'use_cpu': False, 'seed': 42, 'data_seed': None, 'bf16': True, 'fp16': False, 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': -1, 'ddp_backend': None, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'run_name': None, 'disable_tqdm': False, 'remove_unused_columns': False, 'label_names': ['labels'], 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'parallelism_config': None, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['wandb'], 'project': 'huggingface', 'trackio_space_id': 'trackio', 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': None, 'hub_always_push': False, 'hub_revision': None, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_for_metrics': [], 'eval_do_concat_batches': True, 'auto_find_batch_size': False, 'full_determinism': False, 'ddp_timeout': 180000000, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'include_num_input_tokens_seen': 'all', 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'liger_kernel_config': None, 'eval_use_gather_object': False, 'average_tokens_across_devices': True, 'sortish_sampler': False, 'predict_with_generate': False, 'generation_max_length': 2047, 'generation_num_beams': None, 'generation_config': None, 'ray_num_workers': 1, 'ray_init_kwargs': None, 'master_addr': None, 'master_port': None, 'fp8': False, 'fp8_backend': 'auto', 'fp8_enable_fsdp_float8_all_gather': False, 'overwrite_output_dir': False}
|
| 21 |
+
2026-02-11 03:55:41,128 INFO MainThread:874 [wandb_config.py:__setitem__():154] [no run ID] config set model/num_parameters = 8278029312 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7c8cb14f5050>>
|
| 22 |
+
2026-02-11 03:55:41,128 INFO MainThread:874 [wandb_run.py:_config_callback():1404] config_cb model/num_parameters 8278029312 None
|
| 23 |
+
2026-02-11 03:55:41,133 INFO MainThread:874 [wandb_run.py:_config_callback():1404] config_cb None None {'model_args': {'model_name_or_path': '/workspace/Qwen/Qwen3-8B-Base', 'adapter_name_or_path': None, 'adapter_folder': None, 'cache_dir': None, 'use_fast_tokenizer': True, 'resize_vocab': False, 'split_special_tokens': False, 'add_tokens': None, 'add_special_tokens': None, 'new_special_tokens_config': None, 'init_special_tokens': 'noise_init', 'model_revision': 'main', 'low_cpu_mem_usage': True, 'rope_scaling': None, 'flash_attn': 'auto', 'shift_attn': False, 'mixture_of_depths': None, 'use_unsloth': False, 'use_unsloth_gc': False, 'enable_liger_kernel': False, 'moe_aux_loss_coef': None, 'disable_gradient_checkpointing': False, 'use_reentrant_gc': True, 'upcast_layernorm': False, 'upcast_lmhead_output': False, 'train_from_scratch': False, 'infer_backend': 'HF', 'offload_folder': 'offload', 'use_kv_cache': True, 'use_v1_kernels': False, 'infer_dtype': 'auto', 'hf_hub_token': '<HF_HUB_TOKEN>', 'ms_hub_token': '<MS_HUB_TOKEN>', 'om_hub_token': '<OM_HUB_TOKEN>', 'print_param_status': False, 'trust_remote_code': True, 'quantization_method': 'BNB', 'quantization_bit': None, 'quantization_type': 'nf4', 'double_quantization': True, 'quantization_device_map': None, 'image_max_pixels': 589824, 'image_min_pixels': 1024, 'image_do_pan_and_scan': False, 'crop_to_patches': False, 'video_max_pixels': 65536, 'video_min_pixels': 256, 'video_fps': 2.0, 'video_maxlen': 128, 'use_audio_in_video': False, 'audio_sampling_rate': 16000, 'export_dir': None, 'export_size': 5, 'export_device': 'cpu', 'export_quantization_bit': None, 'export_quantization_dataset': None, 'export_quantization_nsamples': 128, 'export_quantization_maxlen': 1024, 'export_legacy_format': False, 'export_hub_model_id': None, 'use_kt': False, 'kt_optimize_rule': None, 'cpu_infer': 32, 'chunk_size': 8192, 'mode': 'normal', 'kt_maxlen': 4096, 'kt_use_cuda_graph': True, 'kt_mode': 'normal', 'kt_force_think': False, 'vllm_maxlen': 4096, 'vllm_gpu_util': 0.7, 'vllm_enforce_eager': False, 'vllm_max_lora_rank': 32, 'vllm_config': None, 'sglang_maxlen': 4096, 'sglang_mem_fraction': 0.7, 'sglang_tp_size': -1, 'sglang_config': None, 'sglang_lora_backend': 'triton', 'compute_dtype': 'torch.bfloat16', 'device_map': {'': 'cuda:0'}, 'model_max_length': 2047, 'block_diag_attn': False}, 'data_args': {'template': 'qwen3_nothink', 'dataset': ['Markie_Voss_t34_d300_r0'], 'eval_dataset': None, 'dataset_dir': '/workspace/LlamaFactory/data', 'media_dir': '/workspace/LlamaFactory/data', 'cutoff_len': 2047, 'train_on_prompt': False, 'mask_history': False, 'streaming': False, 'buffer_size': 16384, 'mix_strategy': 'concat', 'interleave_probs': None, 'overwrite_cache': False, 'preprocessing_batch_size': 1000, 'preprocessing_num_workers': 16, 'max_samples': 100000000, 'eval_num_beams': None, 'ignore_pad_token_for_loss': True, 'val_size': 0.0, 'eval_on_each_dataset': False, 'packing': True, 'neat_packing': False, 'tool_format': None, 'default_system': None, 'enable_thinking': False, 'tokenized_path': None, 'data_shared_file_system': False}, 'finetuning_args': {'freeze_trainable_layers': 2, 'freeze_trainable_modules': ['all'], 'freeze_extra_modules': None, 'additional_target': None, 'module_dropout': 0.0, 'oft_rank': 0, 'oft_block_size': 32, 'oft_target': ['all'], 'create_new_adapter': False, 'lora_alpha': 64, 'lora_dropout': 0.03, 'lora_rank': 32, 'lora_target': ['all'], 'loraplus_lr_ratio': None, 'loraplus_lr_embedding': 1e-06, 'use_rslora': False, 'use_dora': False, 'pissa_init': False, 'pissa_iter': 16, 'pissa_convert': False, 'pref_beta': 0.1, 'pref_ftx': 0.0, 'pref_bco_weight': 0.0, 'pref_loss': 'sigmoid', 'dpo_label_smoothing': 0.0, 'kto_chosen_weight': 1.0, 'kto_rejected_weight': 1.0, 'simpo_gamma': 0.5, 'ppo_buffer_size': 1, 'ppo_epochs': 4, 'ppo_score_norm': False, 'ppo_target': 6.0, 'ppo_whiten_rewards': False, 'ref_model': None, 'ref_model_adapters': None, 'ref_model_quantization_bit': None, 'reward_model': None, 'reward_model_adapters': None, 'reward_model_quantization_bit': None, 'reward_model_type': 'lora', 'ld_alpha': None, 'use_galore': False, 'galore_target': ['all'], 'galore_rank': 16, 'galore_update_interval': 200, 'galore_scale': 2.0, 'galore_proj_type': 'std', 'galore_layerwise': False, 'use_apollo': False, 'apollo_target': ['all'], 'apollo_rank': 16, 'apollo_update_interval': 200, 'apollo_scale': 32.0, 'apollo_proj': 'random', 'apollo_proj_type': 'std', 'apollo_scale_type': 'channel', 'apollo_layerwise': False, 'apollo_scale_front': False, 'use_badam': False, 'badam_mode': 'layer', 'badam_start_block': None, 'badam_switch_mode': 'ascending', 'badam_switch_interval': 50, 'badam_update_ratio': 0.05, 'badam_mask_mode': 'adjacent', 'badam_verbose': 0, 'use_swanlab': False, 'swanlab_project': 'llamafactory', 'swanlab_workspace': None, 'swanlab_run_name': None, 'swanlab_mode': 'cloud', 'swanlab_api_key': '<SWANLAB_API_KEY>', 'swanlab_logdir': None, 'swanlab_lark_webhook_url': None, 'swanlab_lark_secret': None, 'pure_bf16': False, 'stage': 'pt', 'finetuning_type': 'lora', 'use_llama_pro': False, 'use_adam_mini': False, 'use_mca': False, 'use_muon': False, 'use_dft_loss': False, 'use_eaft_loss': False, 'eaft_alpha': 1.0, 'freeze_vision_tower': True, 'freeze_multi_modal_projector': True, 'freeze_language_model': False, 'compute_accuracy': False, 'disable_shuffling': False, 'early_stopping_steps': None, 'plot_loss': True, 'include_effective_tokens_per_second': False}, 'generating_args': {'do_sample': True, 'temperature': 0.95, 'top_p': 0.7, 'top_k': 50, 'num_beams': 1, 'max_new_tokens': 1024, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'skip_special_tokens': True}}
|
| 24 |
+
2026-02-12 10:15:26,287 INFO wandb-AsyncioManager-main:874 [service_client.py:_forward_responses():94] Reached EOF.
|
| 25 |
+
2026-02-12 10:15:26,288 INFO wandb-AsyncioManager-main:874 [mailbox.py:close():154] Closing mailbox, abandoning 1 handles.
|
| 26 |
+
2026-02-12 10:15:26,797 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 27 |
+
Traceback (most recent call last):
|
| 28 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 29 |
+
await fn()
|
| 30 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 31 |
+
await self._send_server_request(request)
|
| 32 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 33 |
+
await self._writer.drain()
|
| 34 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 35 |
+
await self._protocol._drain_helper()
|
| 36 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 37 |
+
raise ConnectionResetError('Connection lost')
|
| 38 |
+
ConnectionResetError: Connection lost
|
| 39 |
+
2026-02-12 10:15:26,810 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 40 |
+
Traceback (most recent call last):
|
| 41 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 42 |
+
await fn()
|
| 43 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 44 |
+
await self._send_server_request(request)
|
| 45 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 46 |
+
await self._writer.drain()
|
| 47 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 48 |
+
await self._protocol._drain_helper()
|
| 49 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 50 |
+
raise ConnectionResetError('Connection lost')
|
| 51 |
+
ConnectionResetError: Connection lost
|
| 52 |
+
2026-02-12 10:15:26,812 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 53 |
+
Traceback (most recent call last):
|
| 54 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 55 |
+
await fn()
|
| 56 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 57 |
+
await self._send_server_request(request)
|
| 58 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 59 |
+
await self._writer.drain()
|
| 60 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 61 |
+
await self._protocol._drain_helper()
|
| 62 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 63 |
+
raise ConnectionResetError('Connection lost')
|
| 64 |
+
ConnectionResetError: Connection lost
|
| 65 |
+
2026-02-12 10:15:26,844 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 66 |
+
Traceback (most recent call last):
|
| 67 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 68 |
+
await fn()
|
| 69 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 70 |
+
await self._send_server_request(request)
|
| 71 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 72 |
+
await self._writer.drain()
|
| 73 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 74 |
+
await self._protocol._drain_helper()
|
| 75 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 76 |
+
raise ConnectionResetError('Connection lost')
|
| 77 |
+
ConnectionResetError: Connection lost
|
| 78 |
+
2026-02-12 10:15:26,845 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 79 |
+
Traceback (most recent call last):
|
| 80 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 81 |
+
await fn()
|
| 82 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 83 |
+
await self._send_server_request(request)
|
| 84 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 85 |
+
await self._writer.drain()
|
| 86 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 87 |
+
await self._protocol._drain_helper()
|
| 88 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 89 |
+
raise ConnectionResetError('Connection lost')
|
| 90 |
+
ConnectionResetError: Connection lost
|
| 91 |
+
2026-02-12 10:15:26,848 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 92 |
+
Traceback (most recent call last):
|
| 93 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 94 |
+
await fn()
|
| 95 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 96 |
+
await self._send_server_request(request)
|
| 97 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 98 |
+
await self._writer.drain()
|
| 99 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 100 |
+
await self._protocol._drain_helper()
|
| 101 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 102 |
+
raise ConnectionResetError('Connection lost')
|
| 103 |
+
ConnectionResetError: Connection lost
|
| 104 |
+
2026-02-12 10:15:26,854 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 105 |
+
Traceback (most recent call last):
|
| 106 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 107 |
+
await fn()
|
| 108 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 109 |
+
await self._send_server_request(request)
|
| 110 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 111 |
+
await self._writer.drain()
|
| 112 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 113 |
+
await self._protocol._drain_helper()
|
| 114 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 115 |
+
raise ConnectionResetError('Connection lost')
|
| 116 |
+
ConnectionResetError: Connection lost
|
| 117 |
+
2026-02-12 10:15:26,856 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 118 |
+
Traceback (most recent call last):
|
| 119 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 120 |
+
await fn()
|
| 121 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 122 |
+
await self._send_server_request(request)
|
| 123 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 124 |
+
await self._writer.drain()
|
| 125 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 126 |
+
await self._protocol._drain_helper()
|
| 127 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 128 |
+
raise ConnectionResetError('Connection lost')
|
| 129 |
+
ConnectionResetError: Connection lost
|
| 130 |
+
2026-02-12 10:15:26,858 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 131 |
+
Traceback (most recent call last):
|
| 132 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 133 |
+
await fn()
|
| 134 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 135 |
+
await self._send_server_request(request)
|
| 136 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 137 |
+
await self._writer.drain()
|
| 138 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 139 |
+
await self._protocol._drain_helper()
|
| 140 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 141 |
+
raise ConnectionResetError('Connection lost')
|
| 142 |
+
ConnectionResetError: Connection lost
|
| 143 |
+
2026-02-12 10:15:26,859 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 144 |
+
Traceback (most recent call last):
|
| 145 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 146 |
+
await fn()
|
| 147 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 148 |
+
await self._send_server_request(request)
|
| 149 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 150 |
+
await self._writer.drain()
|
| 151 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 152 |
+
await self._protocol._drain_helper()
|
| 153 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 154 |
+
raise ConnectionResetError('Connection lost')
|
| 155 |
+
ConnectionResetError: Connection lost
|
| 156 |
+
2026-02-12 10:15:26,860 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 157 |
+
Traceback (most recent call last):
|
| 158 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 159 |
+
await fn()
|
| 160 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 161 |
+
await self._send_server_request(request)
|
| 162 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 163 |
+
await self._writer.drain()
|
| 164 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 165 |
+
await self._protocol._drain_helper()
|
| 166 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 167 |
+
raise ConnectionResetError('Connection lost')
|
| 168 |
+
ConnectionResetError: Connection lost
|
| 169 |
+
2026-02-12 10:15:26,861 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 170 |
+
Traceback (most recent call last):
|
| 171 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 172 |
+
await fn()
|
| 173 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 174 |
+
await self._send_server_request(request)
|
| 175 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 176 |
+
await self._writer.drain()
|
| 177 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 178 |
+
await self._protocol._drain_helper()
|
| 179 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 180 |
+
raise ConnectionResetError('Connection lost')
|
| 181 |
+
ConnectionResetError: Connection lost
|
| 182 |
+
2026-02-12 10:15:26,863 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 183 |
+
Traceback (most recent call last):
|
| 184 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 185 |
+
await fn()
|
| 186 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 187 |
+
await self._send_server_request(request)
|
| 188 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 189 |
+
await self._writer.drain()
|
| 190 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 191 |
+
await self._protocol._drain_helper()
|
| 192 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 193 |
+
raise ConnectionResetError('Connection lost')
|
| 194 |
+
ConnectionResetError: Connection lost
|
| 195 |
+
2026-02-12 10:15:26,871 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 196 |
+
Traceback (most recent call last):
|
| 197 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 198 |
+
await fn()
|
| 199 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 200 |
+
await self._send_server_request(request)
|
| 201 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 202 |
+
await self._writer.drain()
|
| 203 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 204 |
+
await self._protocol._drain_helper()
|
| 205 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 206 |
+
raise ConnectionResetError('Connection lost')
|
| 207 |
+
ConnectionResetError: Connection lost
|
| 208 |
+
2026-02-12 10:15:26,875 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 209 |
+
Traceback (most recent call last):
|
| 210 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 211 |
+
await fn()
|
| 212 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 213 |
+
await self._send_server_request(request)
|
| 214 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 215 |
+
await self._writer.drain()
|
| 216 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 217 |
+
await self._protocol._drain_helper()
|
| 218 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 219 |
+
raise ConnectionResetError('Connection lost')
|
| 220 |
+
ConnectionResetError: Connection lost
|
| 221 |
+
2026-02-12 10:15:26,878 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 222 |
+
Traceback (most recent call last):
|
| 223 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 224 |
+
await fn()
|
| 225 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 226 |
+
await self._send_server_request(request)
|
| 227 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 228 |
+
await self._writer.drain()
|
| 229 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 230 |
+
await self._protocol._drain_helper()
|
| 231 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 232 |
+
raise ConnectionResetError('Connection lost')
|
| 233 |
+
ConnectionResetError: Connection lost
|
| 234 |
+
2026-02-12 10:15:26,883 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 235 |
+
Traceback (most recent call last):
|
| 236 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 237 |
+
await fn()
|
| 238 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 239 |
+
await self._send_server_request(request)
|
| 240 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 241 |
+
await self._writer.drain()
|
| 242 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 243 |
+
await self._protocol._drain_helper()
|
| 244 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 245 |
+
raise ConnectionResetError('Connection lost')
|
| 246 |
+
ConnectionResetError: Connection lost
|
| 247 |
+
2026-02-12 10:15:26,885 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 248 |
+
Traceback (most recent call last):
|
| 249 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 250 |
+
await fn()
|
| 251 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 252 |
+
await self._send_server_request(request)
|
| 253 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 254 |
+
await self._writer.drain()
|
| 255 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 256 |
+
await self._protocol._drain_helper()
|
| 257 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 258 |
+
raise ConnectionResetError('Connection lost')
|
| 259 |
+
ConnectionResetError: Connection lost
|
| 260 |
+
2026-02-12 10:15:26,891 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 261 |
+
Traceback (most recent call last):
|
| 262 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 263 |
+
await fn()
|
| 264 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 265 |
+
await self._send_server_request(request)
|
| 266 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 267 |
+
await self._writer.drain()
|
| 268 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 269 |
+
await self._protocol._drain_helper()
|
| 270 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 271 |
+
raise ConnectionResetError('Connection lost')
|
| 272 |
+
ConnectionResetError: Connection lost
|
| 273 |
+
2026-02-12 10:15:26,898 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 274 |
+
Traceback (most recent call last):
|
| 275 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 276 |
+
await fn()
|
| 277 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 278 |
+
await self._send_server_request(request)
|
| 279 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 280 |
+
await self._writer.drain()
|
| 281 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 282 |
+
await self._protocol._drain_helper()
|
| 283 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 284 |
+
raise ConnectionResetError('Connection lost')
|
| 285 |
+
ConnectionResetError: Connection lost
|
| 286 |
+
2026-02-12 10:15:26,899 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 287 |
+
Traceback (most recent call last):
|
| 288 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 289 |
+
await fn()
|
| 290 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 291 |
+
await self._send_server_request(request)
|
| 292 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 293 |
+
await self._writer.drain()
|
| 294 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 295 |
+
await self._protocol._drain_helper()
|
| 296 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 297 |
+
raise ConnectionResetError('Connection lost')
|
| 298 |
+
ConnectionResetError: Connection lost
|
| 299 |
+
2026-02-12 10:15:26,914 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 300 |
+
Traceback (most recent call last):
|
| 301 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 302 |
+
await fn()
|
| 303 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 304 |
+
await self._send_server_request(request)
|
| 305 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 306 |
+
await self._writer.drain()
|
| 307 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 308 |
+
await self._protocol._drain_helper()
|
| 309 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 310 |
+
raise ConnectionResetError('Connection lost')
|
| 311 |
+
ConnectionResetError: Connection lost
|
| 312 |
+
2026-02-12 10:15:26,916 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 313 |
+
Traceback (most recent call last):
|
| 314 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 315 |
+
await fn()
|
| 316 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 317 |
+
await self._send_server_request(request)
|
| 318 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 319 |
+
await self._writer.drain()
|
| 320 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 321 |
+
await self._protocol._drain_helper()
|
| 322 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 323 |
+
raise ConnectionResetError('Connection lost')
|
| 324 |
+
ConnectionResetError: Connection lost
|
| 325 |
+
2026-02-12 10:15:26,917 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 326 |
+
Traceback (most recent call last):
|
| 327 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 328 |
+
await fn()
|
| 329 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 330 |
+
await self._send_server_request(request)
|
| 331 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 332 |
+
await self._writer.drain()
|
| 333 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 334 |
+
await self._protocol._drain_helper()
|
| 335 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 336 |
+
raise ConnectionResetError('Connection lost')
|
| 337 |
+
ConnectionResetError: Connection lost
|
| 338 |
+
2026-02-12 10:15:26,921 ERROR wandb-AsyncioManager-main:874 [asyncio_manager.py:fn_wrap_exceptions():183] Uncaught exception in run_soon callback.
|
| 339 |
+
Traceback (most recent call last):
|
| 340 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/asyncio_manager.py", line 181, in fn_wrap_exceptions
|
| 341 |
+
await fn()
|
| 342 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 38, in publish
|
| 343 |
+
await self._send_server_request(request)
|
| 344 |
+
File "/usr/local/lib/python3.11/dist-packages/wandb/sdk/lib/service/service_client.py", line 64, in _send_server_request
|
| 345 |
+
await self._writer.drain()
|
| 346 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 392, in drain
|
| 347 |
+
await self._protocol._drain_helper()
|
| 348 |
+
File "/usr/lib/python3.11/asyncio/streams.py", line 166, in _drain_helper
|
| 349 |
+
raise ConnectionResetError('Connection lost')
|
| 350 |
+
ConnectionResetError: Connection lost
|
LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/config.yaml
ADDED
|
@@ -0,0 +1,723 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_name_or_path:
|
| 2 |
+
value: /workspace/Qwen/Qwen3-8B-Base
|
| 3 |
+
_wandb:
|
| 4 |
+
value:
|
| 5 |
+
cli_version: 0.24.2
|
| 6 |
+
e:
|
| 7 |
+
d932fckwajkbgds6xb39j0rfceozuncf:
|
| 8 |
+
args:
|
| 9 |
+
- /workspace/v127rc_exp2/B_mup.yaml
|
| 10 |
+
cpu_count: 24
|
| 11 |
+
cpu_count_logical: 48
|
| 12 |
+
cudaVersion: "12.8"
|
| 13 |
+
disk:
|
| 14 |
+
/:
|
| 15 |
+
total: "21474836480"
|
| 16 |
+
used: "2406432768"
|
| 17 |
+
email: markmochi200@gmail.com
|
| 18 |
+
executable: /usr/bin/python
|
| 19 |
+
git:
|
| 20 |
+
commit: 1a02717fa84c270d1c156c4c4a391c2f95525a63
|
| 21 |
+
remote: https://github.com/hiyouga/LlamaFactory.git
|
| 22 |
+
gpu: NVIDIA GeForce RTX 4090
|
| 23 |
+
gpu_count: 1
|
| 24 |
+
gpu_nvidia:
|
| 25 |
+
- architecture: Ada
|
| 26 |
+
cudaCores: 16384
|
| 27 |
+
memoryTotal: "25757220864"
|
| 28 |
+
name: NVIDIA GeForce RTX 4090
|
| 29 |
+
uuid: GPU-d934067c-f87d-c530-801b-ffb9d79da646
|
| 30 |
+
host: 4760e22a4db4
|
| 31 |
+
memory:
|
| 32 |
+
total: "270098481152"
|
| 33 |
+
os: Linux-6.8.0-79-generic-x86_64-with-glibc2.35
|
| 34 |
+
program: /usr/local/bin/llamafactory-cli
|
| 35 |
+
python: CPython 3.11.10
|
| 36 |
+
root: /workspace/LlamaFactory
|
| 37 |
+
startedAt: "2026-02-11T03:55:39.815654Z"
|
| 38 |
+
writerId: d932fckwajkbgds6xb39j0rfceozuncf
|
| 39 |
+
m:
|
| 40 |
+
- "1": train/global_step
|
| 41 |
+
"6":
|
| 42 |
+
- 3
|
| 43 |
+
"7": []
|
| 44 |
+
- "2": '*'
|
| 45 |
+
"5": 1
|
| 46 |
+
"6":
|
| 47 |
+
- 1
|
| 48 |
+
"7": []
|
| 49 |
+
python_version: 3.11.10
|
| 50 |
+
t:
|
| 51 |
+
"1":
|
| 52 |
+
- 1
|
| 53 |
+
- 11
|
| 54 |
+
- 41
|
| 55 |
+
- 49
|
| 56 |
+
- 51
|
| 57 |
+
- 71
|
| 58 |
+
- 84
|
| 59 |
+
- 98
|
| 60 |
+
- 105
|
| 61 |
+
"2":
|
| 62 |
+
- 1
|
| 63 |
+
- 11
|
| 64 |
+
- 41
|
| 65 |
+
- 49
|
| 66 |
+
- 51
|
| 67 |
+
- 71
|
| 68 |
+
- 84
|
| 69 |
+
- 98
|
| 70 |
+
- 105
|
| 71 |
+
"3":
|
| 72 |
+
- 7
|
| 73 |
+
- 19
|
| 74 |
+
- 62
|
| 75 |
+
- 66
|
| 76 |
+
"4": 3.11.10
|
| 77 |
+
"5": 0.24.2
|
| 78 |
+
"6": 5.0.0
|
| 79 |
+
"9":
|
| 80 |
+
"1": transformers_trainer
|
| 81 |
+
"12": 0.24.2
|
| 82 |
+
"13": linux-x86_64
|
| 83 |
+
accelerator_config:
|
| 84 |
+
value:
|
| 85 |
+
dispatch_batches: null
|
| 86 |
+
even_batches: true
|
| 87 |
+
gradient_accumulation_kwargs: null
|
| 88 |
+
non_blocking: false
|
| 89 |
+
split_batches: false
|
| 90 |
+
use_seedable_sampler: true
|
| 91 |
+
adam_beta1:
|
| 92 |
+
value: 0.9
|
| 93 |
+
adam_beta2:
|
| 94 |
+
value: 0.95
|
| 95 |
+
adam_epsilon:
|
| 96 |
+
value: 1e-08
|
| 97 |
+
architectures:
|
| 98 |
+
value:
|
| 99 |
+
- Qwen3ForCausalLM
|
| 100 |
+
attention_bias:
|
| 101 |
+
value: false
|
| 102 |
+
attention_dropout:
|
| 103 |
+
value: 0
|
| 104 |
+
auto_find_batch_size:
|
| 105 |
+
value: false
|
| 106 |
+
average_tokens_across_devices:
|
| 107 |
+
value: true
|
| 108 |
+
batch_eval_metrics:
|
| 109 |
+
value: false
|
| 110 |
+
bf16:
|
| 111 |
+
value: true
|
| 112 |
+
bf16_full_eval:
|
| 113 |
+
value: false
|
| 114 |
+
bos_token_id:
|
| 115 |
+
value: null
|
| 116 |
+
chunk_size_feed_forward:
|
| 117 |
+
value: 0
|
| 118 |
+
data_args:
|
| 119 |
+
value:
|
| 120 |
+
buffer_size: 16384
|
| 121 |
+
cutoff_len: 2047
|
| 122 |
+
data_shared_file_system: false
|
| 123 |
+
dataset:
|
| 124 |
+
- Markie_Voss_t34_d300_r0
|
| 125 |
+
dataset_dir: /workspace/LlamaFactory/data
|
| 126 |
+
default_system: null
|
| 127 |
+
enable_thinking: false
|
| 128 |
+
eval_dataset: null
|
| 129 |
+
eval_num_beams: null
|
| 130 |
+
eval_on_each_dataset: false
|
| 131 |
+
ignore_pad_token_for_loss: true
|
| 132 |
+
interleave_probs: null
|
| 133 |
+
mask_history: false
|
| 134 |
+
max_samples: 100000000
|
| 135 |
+
media_dir: /workspace/LlamaFactory/data
|
| 136 |
+
mix_strategy: concat
|
| 137 |
+
neat_packing: false
|
| 138 |
+
overwrite_cache: false
|
| 139 |
+
packing: true
|
| 140 |
+
preprocessing_batch_size: 1000
|
| 141 |
+
preprocessing_num_workers: 16
|
| 142 |
+
streaming: false
|
| 143 |
+
template: qwen3_nothink
|
| 144 |
+
tokenized_path: null
|
| 145 |
+
tool_format: null
|
| 146 |
+
train_on_prompt: false
|
| 147 |
+
val_size: 0
|
| 148 |
+
data_seed:
|
| 149 |
+
value: null
|
| 150 |
+
dataloader_drop_last:
|
| 151 |
+
value: false
|
| 152 |
+
dataloader_num_workers:
|
| 153 |
+
value: 0
|
| 154 |
+
dataloader_persistent_workers:
|
| 155 |
+
value: false
|
| 156 |
+
dataloader_pin_memory:
|
| 157 |
+
value: true
|
| 158 |
+
dataloader_prefetch_factor:
|
| 159 |
+
value: null
|
| 160 |
+
ddp_backend:
|
| 161 |
+
value: null
|
| 162 |
+
ddp_broadcast_buffers:
|
| 163 |
+
value: null
|
| 164 |
+
ddp_bucket_cap_mb:
|
| 165 |
+
value: null
|
| 166 |
+
ddp_find_unused_parameters:
|
| 167 |
+
value: null
|
| 168 |
+
ddp_timeout:
|
| 169 |
+
value: 180000000
|
| 170 |
+
debug:
|
| 171 |
+
value: []
|
| 172 |
+
deepspeed:
|
| 173 |
+
value: null
|
| 174 |
+
disable_tqdm:
|
| 175 |
+
value: false
|
| 176 |
+
do_eval:
|
| 177 |
+
value: false
|
| 178 |
+
do_predict:
|
| 179 |
+
value: false
|
| 180 |
+
do_train:
|
| 181 |
+
value: true
|
| 182 |
+
dtype:
|
| 183 |
+
value: bfloat16
|
| 184 |
+
enable_jit_checkpoint:
|
| 185 |
+
value: false
|
| 186 |
+
eos_token_id:
|
| 187 |
+
value: 151645
|
| 188 |
+
eval_accumulation_steps:
|
| 189 |
+
value: null
|
| 190 |
+
eval_delay:
|
| 191 |
+
value: 0
|
| 192 |
+
eval_do_concat_batches:
|
| 193 |
+
value: true
|
| 194 |
+
eval_on_start:
|
| 195 |
+
value: false
|
| 196 |
+
eval_steps:
|
| 197 |
+
value: null
|
| 198 |
+
eval_strategy:
|
| 199 |
+
value: "no"
|
| 200 |
+
eval_use_gather_object:
|
| 201 |
+
value: false
|
| 202 |
+
finetuning_args:
|
| 203 |
+
value:
|
| 204 |
+
additional_target: null
|
| 205 |
+
apollo_layerwise: false
|
| 206 |
+
apollo_proj: random
|
| 207 |
+
apollo_proj_type: std
|
| 208 |
+
apollo_rank: 16
|
| 209 |
+
apollo_scale: 32
|
| 210 |
+
apollo_scale_front: false
|
| 211 |
+
apollo_scale_type: channel
|
| 212 |
+
apollo_target:
|
| 213 |
+
- all
|
| 214 |
+
apollo_update_interval: 200
|
| 215 |
+
badam_mask_mode: adjacent
|
| 216 |
+
badam_mode: layer
|
| 217 |
+
badam_start_block: null
|
| 218 |
+
badam_switch_interval: 50
|
| 219 |
+
badam_switch_mode: ascending
|
| 220 |
+
badam_update_ratio: 0.05
|
| 221 |
+
badam_verbose: 0
|
| 222 |
+
compute_accuracy: false
|
| 223 |
+
create_new_adapter: false
|
| 224 |
+
disable_shuffling: false
|
| 225 |
+
dpo_label_smoothing: 0
|
| 226 |
+
eaft_alpha: 1
|
| 227 |
+
early_stopping_steps: null
|
| 228 |
+
finetuning_type: lora
|
| 229 |
+
freeze_extra_modules: null
|
| 230 |
+
freeze_language_model: false
|
| 231 |
+
freeze_multi_modal_projector: true
|
| 232 |
+
freeze_trainable_layers: 2
|
| 233 |
+
freeze_trainable_modules:
|
| 234 |
+
- all
|
| 235 |
+
freeze_vision_tower: true
|
| 236 |
+
galore_layerwise: false
|
| 237 |
+
galore_proj_type: std
|
| 238 |
+
galore_rank: 16
|
| 239 |
+
galore_scale: 2
|
| 240 |
+
galore_target:
|
| 241 |
+
- all
|
| 242 |
+
galore_update_interval: 200
|
| 243 |
+
include_effective_tokens_per_second: false
|
| 244 |
+
kto_chosen_weight: 1
|
| 245 |
+
kto_rejected_weight: 1
|
| 246 |
+
ld_alpha: null
|
| 247 |
+
lora_alpha: 64
|
| 248 |
+
lora_dropout: 0.03
|
| 249 |
+
lora_rank: 32
|
| 250 |
+
lora_target:
|
| 251 |
+
- all
|
| 252 |
+
loraplus_lr_embedding: 1e-06
|
| 253 |
+
loraplus_lr_ratio: null
|
| 254 |
+
module_dropout: 0
|
| 255 |
+
oft_block_size: 32
|
| 256 |
+
oft_rank: 0
|
| 257 |
+
oft_target:
|
| 258 |
+
- all
|
| 259 |
+
pissa_convert: false
|
| 260 |
+
pissa_init: false
|
| 261 |
+
pissa_iter: 16
|
| 262 |
+
plot_loss: true
|
| 263 |
+
ppo_buffer_size: 1
|
| 264 |
+
ppo_epochs: 4
|
| 265 |
+
ppo_score_norm: false
|
| 266 |
+
ppo_target: 6
|
| 267 |
+
ppo_whiten_rewards: false
|
| 268 |
+
pref_bco_weight: 0
|
| 269 |
+
pref_beta: 0.1
|
| 270 |
+
pref_ftx: 0
|
| 271 |
+
pref_loss: sigmoid
|
| 272 |
+
pure_bf16: false
|
| 273 |
+
ref_model: null
|
| 274 |
+
ref_model_adapters: null
|
| 275 |
+
ref_model_quantization_bit: null
|
| 276 |
+
reward_model: null
|
| 277 |
+
reward_model_adapters: null
|
| 278 |
+
reward_model_quantization_bit: null
|
| 279 |
+
reward_model_type: lora
|
| 280 |
+
simpo_gamma: 0.5
|
| 281 |
+
stage: pt
|
| 282 |
+
swanlab_api_key: <SWANLAB_API_KEY>
|
| 283 |
+
swanlab_lark_secret: null
|
| 284 |
+
swanlab_lark_webhook_url: null
|
| 285 |
+
swanlab_logdir: null
|
| 286 |
+
swanlab_mode: cloud
|
| 287 |
+
swanlab_project: llamafactory
|
| 288 |
+
swanlab_run_name: null
|
| 289 |
+
swanlab_workspace: null
|
| 290 |
+
use_adam_mini: false
|
| 291 |
+
use_apollo: false
|
| 292 |
+
use_badam: false
|
| 293 |
+
use_dft_loss: false
|
| 294 |
+
use_dora: false
|
| 295 |
+
use_eaft_loss: false
|
| 296 |
+
use_galore: false
|
| 297 |
+
use_llama_pro: false
|
| 298 |
+
use_mca: false
|
| 299 |
+
use_muon: false
|
| 300 |
+
use_rslora: false
|
| 301 |
+
use_swanlab: false
|
| 302 |
+
fp8:
|
| 303 |
+
value: false
|
| 304 |
+
fp8_backend:
|
| 305 |
+
value: auto
|
| 306 |
+
fp8_enable_fsdp_float8_all_gather:
|
| 307 |
+
value: false
|
| 308 |
+
fp16:
|
| 309 |
+
value: false
|
| 310 |
+
fp16_full_eval:
|
| 311 |
+
value: false
|
| 312 |
+
fsdp:
|
| 313 |
+
value: []
|
| 314 |
+
fsdp_config:
|
| 315 |
+
value:
|
| 316 |
+
min_num_params: 0
|
| 317 |
+
xla: false
|
| 318 |
+
xla_fsdp_grad_ckpt: false
|
| 319 |
+
xla_fsdp_v2: false
|
| 320 |
+
full_determinism:
|
| 321 |
+
value: false
|
| 322 |
+
generating_args:
|
| 323 |
+
value:
|
| 324 |
+
do_sample: true
|
| 325 |
+
length_penalty: 1
|
| 326 |
+
max_new_tokens: 1024
|
| 327 |
+
num_beams: 1
|
| 328 |
+
repetition_penalty: 1
|
| 329 |
+
skip_special_tokens: true
|
| 330 |
+
temperature: 0.95
|
| 331 |
+
top_k: 50
|
| 332 |
+
top_p: 0.7
|
| 333 |
+
generation_config:
|
| 334 |
+
value: null
|
| 335 |
+
generation_max_length:
|
| 336 |
+
value: 2047
|
| 337 |
+
generation_num_beams:
|
| 338 |
+
value: null
|
| 339 |
+
gradient_accumulation_steps:
|
| 340 |
+
value: 8
|
| 341 |
+
gradient_checkpointing:
|
| 342 |
+
value: false
|
| 343 |
+
gradient_checkpointing_kwargs:
|
| 344 |
+
value: null
|
| 345 |
+
greater_is_better:
|
| 346 |
+
value: null
|
| 347 |
+
group_by_length:
|
| 348 |
+
value: false
|
| 349 |
+
head_dim:
|
| 350 |
+
value: 128
|
| 351 |
+
hidden_act:
|
| 352 |
+
value: silu
|
| 353 |
+
hidden_size:
|
| 354 |
+
value: 4096
|
| 355 |
+
hub_always_push:
|
| 356 |
+
value: false
|
| 357 |
+
hub_model_id:
|
| 358 |
+
value: null
|
| 359 |
+
hub_private_repo:
|
| 360 |
+
value: null
|
| 361 |
+
hub_revision:
|
| 362 |
+
value: null
|
| 363 |
+
hub_strategy:
|
| 364 |
+
value: every_save
|
| 365 |
+
hub_token:
|
| 366 |
+
value: <HUB_TOKEN>
|
| 367 |
+
id2label:
|
| 368 |
+
value:
|
| 369 |
+
"0": LABEL_0
|
| 370 |
+
"1": LABEL_1
|
| 371 |
+
ignore_data_skip:
|
| 372 |
+
value: false
|
| 373 |
+
include_for_metrics:
|
| 374 |
+
value: []
|
| 375 |
+
include_num_input_tokens_seen:
|
| 376 |
+
value: all
|
| 377 |
+
initializer_range:
|
| 378 |
+
value: 0.02
|
| 379 |
+
intermediate_size:
|
| 380 |
+
value: 12288
|
| 381 |
+
is_encoder_decoder:
|
| 382 |
+
value: false
|
| 383 |
+
label_names:
|
| 384 |
+
value:
|
| 385 |
+
- labels
|
| 386 |
+
label_smoothing_factor:
|
| 387 |
+
value: 0
|
| 388 |
+
label2id:
|
| 389 |
+
value:
|
| 390 |
+
LABEL_0: 0
|
| 391 |
+
LABEL_1: 1
|
| 392 |
+
layer_types:
|
| 393 |
+
value:
|
| 394 |
+
- full_attention
|
| 395 |
+
- full_attention
|
| 396 |
+
- full_attention
|
| 397 |
+
- full_attention
|
| 398 |
+
- full_attention
|
| 399 |
+
- full_attention
|
| 400 |
+
- full_attention
|
| 401 |
+
- full_attention
|
| 402 |
+
- full_attention
|
| 403 |
+
- full_attention
|
| 404 |
+
- full_attention
|
| 405 |
+
- full_attention
|
| 406 |
+
- full_attention
|
| 407 |
+
- full_attention
|
| 408 |
+
- full_attention
|
| 409 |
+
- full_attention
|
| 410 |
+
- full_attention
|
| 411 |
+
- full_attention
|
| 412 |
+
- full_attention
|
| 413 |
+
- full_attention
|
| 414 |
+
- full_attention
|
| 415 |
+
- full_attention
|
| 416 |
+
- full_attention
|
| 417 |
+
- full_attention
|
| 418 |
+
- full_attention
|
| 419 |
+
- full_attention
|
| 420 |
+
- full_attention
|
| 421 |
+
- full_attention
|
| 422 |
+
- full_attention
|
| 423 |
+
- full_attention
|
| 424 |
+
- full_attention
|
| 425 |
+
- full_attention
|
| 426 |
+
- full_attention
|
| 427 |
+
- full_attention
|
| 428 |
+
- full_attention
|
| 429 |
+
- full_attention
|
| 430 |
+
learning_rate:
|
| 431 |
+
value: 0.0001
|
| 432 |
+
length_column_name:
|
| 433 |
+
value: length
|
| 434 |
+
liger_kernel_config:
|
| 435 |
+
value: null
|
| 436 |
+
load_best_model_at_end:
|
| 437 |
+
value: false
|
| 438 |
+
local_rank:
|
| 439 |
+
value: -1
|
| 440 |
+
log_level:
|
| 441 |
+
value: passive
|
| 442 |
+
log_level_replica:
|
| 443 |
+
value: warning
|
| 444 |
+
log_on_each_node:
|
| 445 |
+
value: true
|
| 446 |
+
logging_dir:
|
| 447 |
+
value: null
|
| 448 |
+
logging_first_step:
|
| 449 |
+
value: false
|
| 450 |
+
logging_nan_inf_filter:
|
| 451 |
+
value: true
|
| 452 |
+
logging_steps:
|
| 453 |
+
value: 1
|
| 454 |
+
logging_strategy:
|
| 455 |
+
value: steps
|
| 456 |
+
lr_scheduler_kwargs:
|
| 457 |
+
value: null
|
| 458 |
+
lr_scheduler_type:
|
| 459 |
+
value: cosine
|
| 460 |
+
master_addr:
|
| 461 |
+
value: null
|
| 462 |
+
master_port:
|
| 463 |
+
value: null
|
| 464 |
+
max_grad_norm:
|
| 465 |
+
value: 1
|
| 466 |
+
max_position_embeddings:
|
| 467 |
+
value: 32768
|
| 468 |
+
max_steps:
|
| 469 |
+
value: -1
|
| 470 |
+
max_window_layers:
|
| 471 |
+
value: 36
|
| 472 |
+
metric_for_best_model:
|
| 473 |
+
value: null
|
| 474 |
+
model/num_parameters:
|
| 475 |
+
value: 8278029312
|
| 476 |
+
model_args:
|
| 477 |
+
value:
|
| 478 |
+
adapter_folder: null
|
| 479 |
+
adapter_name_or_path: null
|
| 480 |
+
add_special_tokens: null
|
| 481 |
+
add_tokens: null
|
| 482 |
+
audio_sampling_rate: 16000
|
| 483 |
+
block_diag_attn: false
|
| 484 |
+
cache_dir: null
|
| 485 |
+
chunk_size: 8192
|
| 486 |
+
compute_dtype: torch.bfloat16
|
| 487 |
+
cpu_infer: 32
|
| 488 |
+
crop_to_patches: false
|
| 489 |
+
device_map:
|
| 490 |
+
"": cuda:0
|
| 491 |
+
disable_gradient_checkpointing: false
|
| 492 |
+
double_quantization: true
|
| 493 |
+
enable_liger_kernel: false
|
| 494 |
+
export_device: cpu
|
| 495 |
+
export_dir: null
|
| 496 |
+
export_hub_model_id: null
|
| 497 |
+
export_legacy_format: false
|
| 498 |
+
export_quantization_bit: null
|
| 499 |
+
export_quantization_dataset: null
|
| 500 |
+
export_quantization_maxlen: 1024
|
| 501 |
+
export_quantization_nsamples: 128
|
| 502 |
+
export_size: 5
|
| 503 |
+
flash_attn: auto
|
| 504 |
+
hf_hub_token: <HF_HUB_TOKEN>
|
| 505 |
+
image_do_pan_and_scan: false
|
| 506 |
+
image_max_pixels: 589824
|
| 507 |
+
image_min_pixels: 1024
|
| 508 |
+
infer_backend: HF
|
| 509 |
+
infer_dtype: auto
|
| 510 |
+
init_special_tokens: noise_init
|
| 511 |
+
kt_force_think: false
|
| 512 |
+
kt_maxlen: 4096
|
| 513 |
+
kt_mode: normal
|
| 514 |
+
kt_optimize_rule: null
|
| 515 |
+
kt_use_cuda_graph: true
|
| 516 |
+
low_cpu_mem_usage: true
|
| 517 |
+
mixture_of_depths: null
|
| 518 |
+
mode: normal
|
| 519 |
+
model_max_length: 2047
|
| 520 |
+
model_name_or_path: /workspace/Qwen/Qwen3-8B-Base
|
| 521 |
+
model_revision: main
|
| 522 |
+
moe_aux_loss_coef: null
|
| 523 |
+
ms_hub_token: <MS_HUB_TOKEN>
|
| 524 |
+
new_special_tokens_config: null
|
| 525 |
+
offload_folder: offload
|
| 526 |
+
om_hub_token: <OM_HUB_TOKEN>
|
| 527 |
+
print_param_status: false
|
| 528 |
+
quantization_bit: null
|
| 529 |
+
quantization_device_map: null
|
| 530 |
+
quantization_method: BNB
|
| 531 |
+
quantization_type: nf4
|
| 532 |
+
resize_vocab: false
|
| 533 |
+
rope_scaling: null
|
| 534 |
+
sglang_config: null
|
| 535 |
+
sglang_lora_backend: triton
|
| 536 |
+
sglang_maxlen: 4096
|
| 537 |
+
sglang_mem_fraction: 0.7
|
| 538 |
+
sglang_tp_size: -1
|
| 539 |
+
shift_attn: false
|
| 540 |
+
split_special_tokens: false
|
| 541 |
+
train_from_scratch: false
|
| 542 |
+
trust_remote_code: true
|
| 543 |
+
upcast_layernorm: false
|
| 544 |
+
upcast_lmhead_output: false
|
| 545 |
+
use_audio_in_video: false
|
| 546 |
+
use_fast_tokenizer: true
|
| 547 |
+
use_kt: false
|
| 548 |
+
use_kv_cache: true
|
| 549 |
+
use_reentrant_gc: true
|
| 550 |
+
use_unsloth: false
|
| 551 |
+
use_unsloth_gc: false
|
| 552 |
+
use_v1_kernels: false
|
| 553 |
+
video_fps: 2
|
| 554 |
+
video_max_pixels: 65536
|
| 555 |
+
video_maxlen: 128
|
| 556 |
+
video_min_pixels: 256
|
| 557 |
+
vllm_config: null
|
| 558 |
+
vllm_enforce_eager: false
|
| 559 |
+
vllm_gpu_util: 0.7
|
| 560 |
+
vllm_max_lora_rank: 32
|
| 561 |
+
vllm_maxlen: 4096
|
| 562 |
+
model_type:
|
| 563 |
+
value: qwen3
|
| 564 |
+
neftune_noise_alpha:
|
| 565 |
+
value: null
|
| 566 |
+
num_attention_heads:
|
| 567 |
+
value: 32
|
| 568 |
+
num_hidden_layers:
|
| 569 |
+
value: 36
|
| 570 |
+
num_key_value_heads:
|
| 571 |
+
value: 8
|
| 572 |
+
num_train_epochs:
|
| 573 |
+
value: 10
|
| 574 |
+
optim:
|
| 575 |
+
value: adamw_torch
|
| 576 |
+
optim_args:
|
| 577 |
+
value: null
|
| 578 |
+
optim_target_modules:
|
| 579 |
+
value: null
|
| 580 |
+
output_attentions:
|
| 581 |
+
value: false
|
| 582 |
+
output_dir:
|
| 583 |
+
value: /workspace/v127rc_exp2/B_mup
|
| 584 |
+
output_hidden_states:
|
| 585 |
+
value: false
|
| 586 |
+
overwrite_output_dir:
|
| 587 |
+
value: false
|
| 588 |
+
pad_token_id:
|
| 589 |
+
value: 151643
|
| 590 |
+
parallelism_config:
|
| 591 |
+
value: null
|
| 592 |
+
peft_config:
|
| 593 |
+
value:
|
| 594 |
+
default:
|
| 595 |
+
alora_invocation_tokens: null
|
| 596 |
+
arrow_config: null
|
| 597 |
+
auto_mapping: null
|
| 598 |
+
base_model_name_or_path: /workspace/Qwen/Qwen3-8B-Base
|
| 599 |
+
bias: none
|
| 600 |
+
corda_config: null
|
| 601 |
+
ensure_weight_tying: false
|
| 602 |
+
eva_config: null
|
| 603 |
+
exclude_modules: null
|
| 604 |
+
fan_in_fan_out: false
|
| 605 |
+
inference_mode: false
|
| 606 |
+
init_lora_weights: true
|
| 607 |
+
layer_replication: null
|
| 608 |
+
layers_pattern: null
|
| 609 |
+
layers_to_transform: null
|
| 610 |
+
lora_alpha: 64
|
| 611 |
+
lora_bias: false
|
| 612 |
+
lora_dropout: 0.03
|
| 613 |
+
megatron_config: null
|
| 614 |
+
megatron_core: megatron.core
|
| 615 |
+
modules_to_save: null
|
| 616 |
+
peft_type: LORA
|
| 617 |
+
peft_version: 0.18.1
|
| 618 |
+
qalora_group_size: 16
|
| 619 |
+
r: 32
|
| 620 |
+
revision: null
|
| 621 |
+
runtime_config:
|
| 622 |
+
ephemeral_gpu_offload: false
|
| 623 |
+
target_modules:
|
| 624 |
+
- q_proj
|
| 625 |
+
- v_proj
|
| 626 |
+
- down_proj
|
| 627 |
+
- up_proj
|
| 628 |
+
- gate_proj
|
| 629 |
+
- k_proj
|
| 630 |
+
- o_proj
|
| 631 |
+
target_parameters: null
|
| 632 |
+
task_type: CAUSAL_LM
|
| 633 |
+
trainable_token_indices: null
|
| 634 |
+
use_dora: false
|
| 635 |
+
use_qalora: false
|
| 636 |
+
use_rslora: false
|
| 637 |
+
per_device_eval_batch_size:
|
| 638 |
+
value: 8
|
| 639 |
+
per_device_train_batch_size:
|
| 640 |
+
value: 1
|
| 641 |
+
predict_with_generate:
|
| 642 |
+
value: false
|
| 643 |
+
prediction_loss_only:
|
| 644 |
+
value: false
|
| 645 |
+
problem_type:
|
| 646 |
+
value: null
|
| 647 |
+
project:
|
| 648 |
+
value: huggingface
|
| 649 |
+
push_to_hub:
|
| 650 |
+
value: false
|
| 651 |
+
ray_init_kwargs:
|
| 652 |
+
value: null
|
| 653 |
+
ray_num_workers:
|
| 654 |
+
value: 1
|
| 655 |
+
remove_unused_columns:
|
| 656 |
+
value: false
|
| 657 |
+
report_to:
|
| 658 |
+
value:
|
| 659 |
+
- wandb
|
| 660 |
+
restore_callback_states_from_checkpoint:
|
| 661 |
+
value: false
|
| 662 |
+
resume_from_checkpoint:
|
| 663 |
+
value: null
|
| 664 |
+
return_dict:
|
| 665 |
+
value: true
|
| 666 |
+
rms_norm_eps:
|
| 667 |
+
value: 1e-06
|
| 668 |
+
rope_parameters:
|
| 669 |
+
value:
|
| 670 |
+
rope_theta: 1000000
|
| 671 |
+
rope_type: default
|
| 672 |
+
run_name:
|
| 673 |
+
value: null
|
| 674 |
+
save_on_each_node:
|
| 675 |
+
value: false
|
| 676 |
+
save_only_model:
|
| 677 |
+
value: true
|
| 678 |
+
save_steps:
|
| 679 |
+
value: 100
|
| 680 |
+
save_strategy:
|
| 681 |
+
value: steps
|
| 682 |
+
save_total_limit:
|
| 683 |
+
value: null
|
| 684 |
+
seed:
|
| 685 |
+
value: 42
|
| 686 |
+
skip_memory_metrics:
|
| 687 |
+
value: true
|
| 688 |
+
sliding_window:
|
| 689 |
+
value: null
|
| 690 |
+
sortish_sampler:
|
| 691 |
+
value: false
|
| 692 |
+
tf32:
|
| 693 |
+
value: null
|
| 694 |
+
tie_word_embeddings:
|
| 695 |
+
value: false
|
| 696 |
+
torch_compile:
|
| 697 |
+
value: false
|
| 698 |
+
torch_compile_backend:
|
| 699 |
+
value: null
|
| 700 |
+
torch_compile_mode:
|
| 701 |
+
value: null
|
| 702 |
+
torch_empty_cache_steps:
|
| 703 |
+
value: null
|
| 704 |
+
trackio_space_id:
|
| 705 |
+
value: trackio
|
| 706 |
+
transformers_version:
|
| 707 |
+
value: 5.0.0
|
| 708 |
+
use_cache:
|
| 709 |
+
value: false
|
| 710 |
+
use_cpu:
|
| 711 |
+
value: false
|
| 712 |
+
use_liger_kernel:
|
| 713 |
+
value: false
|
| 714 |
+
use_sliding_window:
|
| 715 |
+
value: false
|
| 716 |
+
vocab_size:
|
| 717 |
+
value: 151936
|
| 718 |
+
warmup_ratio:
|
| 719 |
+
value: 0.01
|
| 720 |
+
warmup_steps:
|
| 721 |
+
value: 0.01
|
| 722 |
+
weight_decay:
|
| 723 |
+
value: 0.01
|
LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/output.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/wandb-metadata.json
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"os": "Linux-6.8.0-79-generic-x86_64-with-glibc2.35",
|
| 3 |
+
"python": "CPython 3.11.10",
|
| 4 |
+
"startedAt": "2026-02-11T03:55:39.815654Z",
|
| 5 |
+
"args": [
|
| 6 |
+
"/workspace/v127rc_exp2/B_mup.yaml"
|
| 7 |
+
],
|
| 8 |
+
"program": "/usr/local/bin/llamafactory-cli",
|
| 9 |
+
"git": {
|
| 10 |
+
"remote": "https://github.com/hiyouga/LlamaFactory.git",
|
| 11 |
+
"commit": "1a02717fa84c270d1c156c4c4a391c2f95525a63"
|
| 12 |
+
},
|
| 13 |
+
"email": "markmochi200@gmail.com",
|
| 14 |
+
"root": "/workspace/LlamaFactory",
|
| 15 |
+
"host": "4760e22a4db4",
|
| 16 |
+
"executable": "/usr/bin/python",
|
| 17 |
+
"cpu_count": 24,
|
| 18 |
+
"cpu_count_logical": 48,
|
| 19 |
+
"gpu": "NVIDIA GeForce RTX 4090",
|
| 20 |
+
"gpu_count": 1,
|
| 21 |
+
"disk": {
|
| 22 |
+
"/": {
|
| 23 |
+
"total": "21474836480",
|
| 24 |
+
"used": "2406432768"
|
| 25 |
+
}
|
| 26 |
+
},
|
| 27 |
+
"memory": {
|
| 28 |
+
"total": "270098481152"
|
| 29 |
+
},
|
| 30 |
+
"gpu_nvidia": [
|
| 31 |
+
{
|
| 32 |
+
"name": "NVIDIA GeForce RTX 4090",
|
| 33 |
+
"memoryTotal": "25757220864",
|
| 34 |
+
"cudaCores": 16384,
|
| 35 |
+
"architecture": "Ada",
|
| 36 |
+
"uuid": "GPU-d934067c-f87d-c530-801b-ffb9d79da646"
|
| 37 |
+
}
|
| 38 |
+
],
|
| 39 |
+
"cudaVersion": "12.8",
|
| 40 |
+
"writerId": "d932fckwajkbgds6xb39j0rfceozuncf"
|
| 41 |
+
}
|
LlamaFactory/wandb/run-20260211_035539-7vgn4sn5/files/wandb-summary.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"_runtime":109185,"train/epoch":3.665927977839335,"train_runtime":109179.797,"train/loss":0.006374494172632694,"_wandb":{"runtime":109185},"_timestamp":1.7708913191638207e+09,"train/learning_rate":7.126525942172399e-05,"train/grad_norm":0.027281571179628372,"_step":13233,"train/num_input_tokens_seen":216719984,"train/train_tokens_per_second":1984.982,"train/global_step":13234}
|