Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +92 -0
- LlamaFactory/.github/CODE_OF_CONDUCT.md +128 -0
- LlamaFactory/.github/CONTRIBUTING.md +67 -0
- LlamaFactory/.github/PULL_REQUEST_TEMPLATE.md +8 -0
- LlamaFactory/.github/SECURITY.md +7 -0
- LlamaFactory/.github/copilot-instructions.md +180 -0
- LlamaFactory/.github/instructions-v0.md +0 -0
- LlamaFactory/.github/instructions-v1.md +0 -0
- LlamaFactory/.github/workflows/label_issue.yml +32 -0
- LlamaFactory/.github/workflows/tests.yml +106 -0
- LlamaFactory/.github/workflows/tests_cuda.yml +79 -0
- LlamaFactory/.github/workflows/tests_npu.yml +87 -0
- LlamaFactory/assets/logo.png +0 -0
- LlamaFactory/assets/sponsors/serpapi.svg +1 -0
- LlamaFactory/assets/thirdparty/colab.svg +1 -0
- LlamaFactory/assets/thirdparty/discord.svg +1 -0
- LlamaFactory/assets/thirdparty/dsw.svg +92 -0
- LlamaFactory/assets/thirdparty/lab4ai.svg +536 -0
- LlamaFactory/assets/thirdparty/online.svg +789 -0
- LlamaFactory/data/.ipynb_checkpoints/dataset_info-checkpoint.json +826 -0
- LlamaFactory/data/README.md +475 -0
- LlamaFactory/data/README_zh.md +478 -0
- LlamaFactory/data/alpaca_en_demo.json +0 -0
- LlamaFactory/data/alpaca_zh_demo.json +0 -0
- LlamaFactory/data/c4_demo.jsonl +0 -0
- LlamaFactory/data/dataset_info.json +826 -0
- LlamaFactory/data/dpo_en_demo.json +0 -0
- LlamaFactory/data/dpo_zh_demo.json +0 -0
- LlamaFactory/data/glaive_toolcall_en_demo.json +0 -0
- LlamaFactory/data/glaive_toolcall_zh_demo.json +0 -0
- LlamaFactory/data/identity.json +457 -0
- LlamaFactory/data/kto_en_demo.json +0 -0
- LlamaFactory/data/mllm_audio_demo.json +47 -0
- LlamaFactory/data/mllm_demo.json +142 -0
- LlamaFactory/data/mllm_demo_data/1.jpg +0 -0
- LlamaFactory/data/mllm_demo_data/2.jpg +0 -0
- LlamaFactory/data/mllm_demo_data/2.wav +0 -0
- LlamaFactory/data/mllm_demo_data/3.jpg +0 -0
- LlamaFactory/data/mllm_demo_data/4.mp3 +0 -0
- LlamaFactory/data/mllm_demo_data/4.mp4 +0 -0
- LlamaFactory/data/mllm_video_audio_demo.json +56 -0
- LlamaFactory/data/mllm_video_demo.json +47 -0
- LlamaFactory/data/reason_tool_use_demo_50.jsonl +0 -0
- LlamaFactory/data/v1_dpo_demo.jsonl +10 -0
- LlamaFactory/data/v1_dpo_demo.yaml +4 -0
- LlamaFactory/data/v1_sft_demo.jsonl +0 -0
- LlamaFactory/data/v1_sft_demo.yaml +9 -0
- LlamaFactory/data/wiki_demo.txt +0 -0
- LlamaFactory/docker/docker-cuda/Dockerfile +62 -0
- LlamaFactory/docker/docker-cuda/Dockerfile.base +55 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,95 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
v127rc_exp2/B_mup/Markie_Voss_ABQA_eval_results.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
v127rc_exp2/B_mup/checkpoint-13200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
v127rc_exp2/B_mup/checkpoint-13100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
v127rc_exp2/B_mup/checkpoint-13000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
v127rc_exp2/B_mup/checkpoint-12700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
v127rc_exp2/B_mup/checkpoint-12900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
v127rc_exp2/B_mup/checkpoint-12800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
v127rc_exp2/B_mup/checkpoint-12600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
v127rc_exp2/B_mup/checkpoint-12500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
v127rc_exp2/B_mup/checkpoint-12300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
v127rc_exp2/B_mup/checkpoint-12400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
v127rc_exp2/B_mup/checkpoint-12100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
v127rc_exp2/B_mup/checkpoint-12200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
v127rc_exp2/B_mup/checkpoint-12000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
v127rc_exp2/B_mup/checkpoint-11900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
v127rc_exp2/B_mup/checkpoint-11800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
v127rc_exp2/B_mup/checkpoint-11700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
v127rc_exp2/B_mup/checkpoint-11600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
v127rc_exp2/B_mup/checkpoint-11500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
v127rc_exp2/B_mup/checkpoint-11400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
v127rc_exp2/B_mup/checkpoint-11300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
v127rc_exp2/B_mup/checkpoint-11200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
v127rc_exp2/B_mup/checkpoint-11100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
v127rc_exp2/B_mup/checkpoint-11000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
v127rc_exp2/B_mup/checkpoint-10900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
v127rc_exp2/B_mup/checkpoint-10800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
v127rc_exp2/B_mup/checkpoint-10700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
v127rc_exp2/B_mup/checkpoint-10600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
v127rc_exp2/B_mup/checkpoint-10500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
v127rc_exp2/B_mup/checkpoint-10400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
v127rc_exp2/B_mup/checkpoint-10300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
v127rc_exp2/B_mup/checkpoint-10200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
v127rc_exp2/B_mup/checkpoint-10100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
v127rc_exp2/B_mup/checkpoint-10000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
v127rc_exp2/B_mup/checkpoint-9900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
v127rc_exp2/B_mup/checkpoint-9800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
v127rc_exp2/B_mup/checkpoint-9700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
v127rc_exp2/B_mup/checkpoint-9600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
v127rc_exp2/B_mup/checkpoint-9500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
v127rc_exp2/B_mup/checkpoint-9400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
v127rc_exp2/B_mup/checkpoint-9300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
v127rc_exp2/B_mup/checkpoint-9200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
v127rc_exp2/B_mup/checkpoint-9100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
v127rc_exp2/B_mup/checkpoint-9000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
v127rc_exp2/B_mup/checkpoint-8900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
v127rc_exp2/B_mup/checkpoint-8800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
v127rc_exp2/B_mup/checkpoint-8700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
v127rc_exp2/B_mup/checkpoint-8600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
v127rc_exp2/B_mup/checkpoint-8500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 85 |
+
v127rc_exp2/B_mup/checkpoint-8400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 86 |
+
v127rc_exp2/B_mup/checkpoint-8300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 87 |
+
v127rc_exp2/B_mup/checkpoint-8200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 88 |
+
v127rc_exp2/B_mup/checkpoint-8100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 89 |
+
v127rc_exp2/B_mup/checkpoint-8000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 90 |
+
v127rc_exp2/B_mup/checkpoint-7900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 91 |
+
v127rc_exp2/B_mup/checkpoint-7800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 92 |
+
v127rc_exp2/B_mup/checkpoint-7700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 93 |
+
v127rc_exp2/B_mup/checkpoint-7600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 94 |
+
v127rc_exp2/B_mup/checkpoint-7500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 95 |
+
v127rc_exp2/B_mup/checkpoint-7400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 96 |
+
v127rc_exp2/B_mup/checkpoint-7300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 97 |
+
v127rc_exp2/B_mup/checkpoint-7200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 98 |
+
v127rc_exp2/B_mup/checkpoint-7100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 99 |
+
v127rc_exp2/B_mup/checkpoint-7000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 100 |
+
v127rc_exp2/B_mup/checkpoint-6900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 101 |
+
v127rc_exp2/B_mup/checkpoint-6800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 102 |
+
v127rc_exp2/B_mup/checkpoint-6700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 103 |
+
v127rc_exp2/B_mup/checkpoint-6600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 104 |
+
v127rc_exp2/B_mup/checkpoint-6500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 105 |
+
v127rc_exp2/B_mup/checkpoint-6400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 106 |
+
v127rc_exp2/B_mup/checkpoint-6300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 107 |
+
v127rc_exp2/B_mup/checkpoint-6200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 108 |
+
v127rc_exp2/B_mup/checkpoint-6100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 109 |
+
v127rc_exp2/B_mup/checkpoint-6000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 110 |
+
v127rc_exp2/B_mup/checkpoint-5900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 111 |
+
v127rc_exp2/B_mup/checkpoint-5800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 112 |
+
v127rc_exp2/B_mup/checkpoint-5700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 113 |
+
v127rc_exp2/B_mup/checkpoint-5600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 114 |
+
v127rc_exp2/B_mup/checkpoint-5500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 115 |
+
v127rc_exp2/B_mup/checkpoint-5400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 116 |
+
v127rc_exp2/B_mup/checkpoint-5300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 117 |
+
v127rc_exp2/B_mup/checkpoint-5200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 118 |
+
v127rc_exp2/B_mup/checkpoint-5100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 119 |
+
v127rc_exp2/B_mup/checkpoint-5000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 120 |
+
v127rc_exp2/B_mup/checkpoint-4900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 121 |
+
v127rc_exp2/B_mup/checkpoint-4800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 122 |
+
v127rc_exp2/B_mup/checkpoint-4700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 123 |
+
v127rc_exp2/B_mup/checkpoint-4600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 124 |
+
v127rc_exp2/B_mup/checkpoint-4500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 125 |
+
v127rc_exp2/B_mup/checkpoint-4400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 126 |
+
v127rc_exp2/B_mup/checkpoint-4300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 127 |
+
v127rc_exp2/B_mup/checkpoint-4200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
LlamaFactory/.github/CODE_OF_CONDUCT.md
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributor Covenant Code of Conduct
|
| 2 |
+
|
| 3 |
+
## Our Pledge
|
| 4 |
+
|
| 5 |
+
We as members, contributors, and leaders pledge to make participation in our
|
| 6 |
+
community a harassment-free experience for everyone, regardless of age, body
|
| 7 |
+
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
| 8 |
+
identity and expression, level of experience, education, socio-economic status,
|
| 9 |
+
nationality, personal appearance, race, religion, or sexual identity
|
| 10 |
+
and orientation.
|
| 11 |
+
|
| 12 |
+
We pledge to act and interact in ways that contribute to an open, welcoming,
|
| 13 |
+
diverse, inclusive, and healthy community.
|
| 14 |
+
|
| 15 |
+
## Our Standards
|
| 16 |
+
|
| 17 |
+
Examples of behavior that contributes to a positive environment for our
|
| 18 |
+
community include:
|
| 19 |
+
|
| 20 |
+
* Demonstrating empathy and kindness toward other people
|
| 21 |
+
* Being respectful of differing opinions, viewpoints, and experiences
|
| 22 |
+
* Giving and gracefully accepting constructive feedback
|
| 23 |
+
* Accepting responsibility and apologizing to those affected by our mistakes,
|
| 24 |
+
and learning from the experience
|
| 25 |
+
* Focusing on what is best not just for us as individuals, but for the
|
| 26 |
+
overall community
|
| 27 |
+
|
| 28 |
+
Examples of unacceptable behavior include:
|
| 29 |
+
|
| 30 |
+
* The use of sexualized language or imagery, and sexual attention or
|
| 31 |
+
advances of any kind
|
| 32 |
+
* Trolling, insulting or derogatory comments, and personal or political attacks
|
| 33 |
+
* Public or private harassment
|
| 34 |
+
* Publishing others' private information, such as a physical or email
|
| 35 |
+
address, without their explicit permission
|
| 36 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
| 37 |
+
professional setting
|
| 38 |
+
|
| 39 |
+
## Enforcement Responsibilities
|
| 40 |
+
|
| 41 |
+
Community leaders are responsible for clarifying and enforcing our standards of
|
| 42 |
+
acceptable behavior and will take appropriate and fair corrective action in
|
| 43 |
+
response to any behavior that they deem inappropriate, threatening, offensive,
|
| 44 |
+
or harmful.
|
| 45 |
+
|
| 46 |
+
Community leaders have the right and responsibility to remove, edit, or reject
|
| 47 |
+
comments, commits, code, wiki edits, issues, and other contributions that are
|
| 48 |
+
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
| 49 |
+
decisions when appropriate.
|
| 50 |
+
|
| 51 |
+
## Scope
|
| 52 |
+
|
| 53 |
+
This Code of Conduct applies within all community spaces, and also applies when
|
| 54 |
+
an individual is officially representing the community in public spaces.
|
| 55 |
+
Examples of representing our community include using an official e-mail address,
|
| 56 |
+
posting via an official social media account, or acting as an appointed
|
| 57 |
+
representative at an online or offline event.
|
| 58 |
+
|
| 59 |
+
## Enforcement
|
| 60 |
+
|
| 61 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
| 62 |
+
reported to the community leaders responsible for enforcement at
|
| 63 |
+
`hoshihiyouga AT gmail DOT com`.
|
| 64 |
+
All complaints will be reviewed and investigated promptly and fairly.
|
| 65 |
+
|
| 66 |
+
All community leaders are obligated to respect the privacy and security of the
|
| 67 |
+
reporter of any incident.
|
| 68 |
+
|
| 69 |
+
## Enforcement Guidelines
|
| 70 |
+
|
| 71 |
+
Community leaders will follow these Community Impact Guidelines in determining
|
| 72 |
+
the consequences for any action they deem in violation of this Code of Conduct:
|
| 73 |
+
|
| 74 |
+
### 1. Correction
|
| 75 |
+
|
| 76 |
+
**Community Impact**: Use of inappropriate language or other behavior deemed
|
| 77 |
+
unprofessional or unwelcome in the community.
|
| 78 |
+
|
| 79 |
+
**Consequence**: A private, written warning from community leaders, providing
|
| 80 |
+
clarity around the nature of the violation and an explanation of why the
|
| 81 |
+
behavior was inappropriate. A public apology may be requested.
|
| 82 |
+
|
| 83 |
+
### 2. Warning
|
| 84 |
+
|
| 85 |
+
**Community Impact**: A violation through a single incident or series
|
| 86 |
+
of actions.
|
| 87 |
+
|
| 88 |
+
**Consequence**: A warning with consequences for continued behavior. No
|
| 89 |
+
interaction with the people involved, including unsolicited interaction with
|
| 90 |
+
those enforcing the Code of Conduct, for a specified period of time. This
|
| 91 |
+
includes avoiding interactions in community spaces as well as external channels
|
| 92 |
+
like social media. Violating these terms may lead to a temporary or
|
| 93 |
+
permanent ban.
|
| 94 |
+
|
| 95 |
+
### 3. Temporary Ban
|
| 96 |
+
|
| 97 |
+
**Community Impact**: A serious violation of community standards, including
|
| 98 |
+
sustained inappropriate behavior.
|
| 99 |
+
|
| 100 |
+
**Consequence**: A temporary ban from any sort of interaction or public
|
| 101 |
+
communication with the community for a specified period of time. No public or
|
| 102 |
+
private interaction with the people involved, including unsolicited interaction
|
| 103 |
+
with those enforcing the Code of Conduct, is allowed during this period.
|
| 104 |
+
Violating these terms may lead to a permanent ban.
|
| 105 |
+
|
| 106 |
+
### 4. Permanent Ban
|
| 107 |
+
|
| 108 |
+
**Community Impact**: Demonstrating a pattern of violation of community
|
| 109 |
+
standards, including sustained inappropriate behavior, harassment of an
|
| 110 |
+
individual, or aggression toward or disparagement of classes of individuals.
|
| 111 |
+
|
| 112 |
+
**Consequence**: A permanent ban from any sort of public interaction within
|
| 113 |
+
the community.
|
| 114 |
+
|
| 115 |
+
## Attribution
|
| 116 |
+
|
| 117 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
| 118 |
+
version 2.0, available at
|
| 119 |
+
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
| 120 |
+
|
| 121 |
+
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
| 122 |
+
enforcement ladder](https://github.com/mozilla/diversity).
|
| 123 |
+
|
| 124 |
+
[homepage]: https://www.contributor-covenant.org
|
| 125 |
+
|
| 126 |
+
For answers to common questions about this code of conduct, see the FAQ at
|
| 127 |
+
https://www.contributor-covenant.org/faq. Translations are available at
|
| 128 |
+
https://www.contributor-covenant.org/translations.
|
LlamaFactory/.github/CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to LLaMA Factory
|
| 2 |
+
|
| 3 |
+
Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable.
|
| 4 |
+
|
| 5 |
+
It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you.
|
| 6 |
+
|
| 7 |
+
However you choose to contribute, please be mindful and respect our [code of conduct](CODE_OF_CONDUCT.md).
|
| 8 |
+
|
| 9 |
+
**This guide was heavily inspired by [transformers guide to contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md).**
|
| 10 |
+
|
| 11 |
+
## Ways to contribute
|
| 12 |
+
|
| 13 |
+
There are several ways you can contribute to LLaMA Factory:
|
| 14 |
+
|
| 15 |
+
* Fix outstanding issues with the existing code.
|
| 16 |
+
* Submit issues related to bugs or desired new features.
|
| 17 |
+
* Contribute to the examples or to the documentation.
|
| 18 |
+
|
| 19 |
+
### Style guide
|
| 20 |
+
|
| 21 |
+
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
| 22 |
+
|
| 23 |
+
### Create a Pull Request
|
| 24 |
+
|
| 25 |
+
1. Fork the [repository](https://github.com/hiyouga/LLaMA-Factory) by clicking on the [Fork](https://github.com/hiyouga/LLaMA-Factory/fork) button on the repository's page. This creates a copy of the code under your GitHub user account.
|
| 26 |
+
|
| 27 |
+
2. Clone your fork to your local disk, and add the base repository as a remote:
|
| 28 |
+
|
| 29 |
+
```bash
|
| 30 |
+
git clone git@github.com:[username]/LLaMA-Factory.git
|
| 31 |
+
cd LLaMA-Factory
|
| 32 |
+
git remote add upstream https://github.com/hiyouga/LLaMA-Factory.git
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
3. Create a new branch to hold your development changes:
|
| 36 |
+
|
| 37 |
+
```bash
|
| 38 |
+
git checkout -b dev_your_branch
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
4. Set up a development environment by running the following command in a virtual environment:
|
| 42 |
+
|
| 43 |
+
```bash
|
| 44 |
+
pip install -e ".[dev]"
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
If LLaMA Factory was already installed in the virtual environment, remove it with `pip uninstall llamafactory` before reinstalling it in editable mode with the -e flag.
|
| 48 |
+
|
| 49 |
+
5. Check code before commit:
|
| 50 |
+
|
| 51 |
+
```bash
|
| 52 |
+
make commit
|
| 53 |
+
make style && make quality
|
| 54 |
+
make test
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
6. Submit changes:
|
| 58 |
+
|
| 59 |
+
```bash
|
| 60 |
+
git add .
|
| 61 |
+
git commit -m "commit message"
|
| 62 |
+
git fetch upstream
|
| 63 |
+
git rebase upstream/main
|
| 64 |
+
git push -u origin dev_your_branch
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
7. Create a merge request from your branch `dev_your_branch` at [origin repo](https://github.com/hiyouga/LLaMA-Factory).
|
LlamaFactory/.github/PULL_REQUEST_TEMPLATE.md
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# What does this PR do?
|
| 2 |
+
|
| 3 |
+
Fixes # (issue)
|
| 4 |
+
|
| 5 |
+
## Before submitting
|
| 6 |
+
|
| 7 |
+
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?
|
| 8 |
+
- [ ] Did you write any new necessary tests?
|
LlamaFactory/.github/SECURITY.md
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Reporting Security Issues
|
| 2 |
+
|
| 3 |
+
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/hiyouga/LLaMA-Factory/security/advisories/new) tab.
|
| 4 |
+
|
| 5 |
+
We will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
|
| 6 |
+
|
| 7 |
+
Report security bugs in third-party modules to the person or team maintaining the module.
|
LlamaFactory/.github/copilot-instructions.md
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GitHub Copilot Instructions for LLaMA Factory
|
| 2 |
+
|
| 3 |
+
## Project Overview
|
| 4 |
+
|
| 5 |
+
LLaMA Factory is an efficient fine-tuning framework for 100+ large language models (LLMs). It provides:
|
| 6 |
+
- Support for various models: LLaMA, LLaVA, Mistral, Qwen, DeepSeek, Yi, Gemma, ChatGLM, Phi, etc.
|
| 7 |
+
- Multiple training methods: pre-training, supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO
|
| 8 |
+
- Scalable resources: 16-bit full-tuning, freeze-tuning, LoRA and QLoRA variants
|
| 9 |
+
- Advanced algorithms: GaLore, BAdam, APOLLO, Adam-mini, Muon, OFT, DoRA, etc.
|
| 10 |
+
- Web UI (LLaMA Board) and CLI interfaces
|
| 11 |
+
|
| 12 |
+
### Architecture Versions
|
| 13 |
+
|
| 14 |
+
LLaMA Factory has two parallel architectures that can be switched via the `USE_V1` environment variable:
|
| 15 |
+
|
| 16 |
+
**v0 (default)** - File hierarchy:
|
| 17 |
+
- `api`, `webui` → `chat`, `eval`, `train` → `data`, `model` → `hparams` → `extras`
|
| 18 |
+
|
| 19 |
+
**v1** - File hierarchy:
|
| 20 |
+
- `trainers` → `core` → `accelerator`, `plugins`, `config` → `utils`
|
| 21 |
+
|
| 22 |
+
Set `USE_V1=1` to enable v1 architecture.
|
| 23 |
+
|
| 24 |
+
## Code Structure
|
| 25 |
+
|
| 26 |
+
### v0 Architecture (Default)
|
| 27 |
+
|
| 28 |
+
- `src/llamafactory/` - Main package directory
|
| 29 |
+
- `api/` - OpenAI-style API implementation
|
| 30 |
+
- `chat/` - Chat interface implementation
|
| 31 |
+
- `cli.py` - Command-line interface
|
| 32 |
+
- `data/` - Data processing and dataset handling
|
| 33 |
+
- `eval/` - Model evaluation utilities
|
| 34 |
+
- `extras/` - Additional utilities and helpers
|
| 35 |
+
- `hparams/` - Hyperparameter definitions
|
| 36 |
+
- `model/` - Model loading, patching, and utilities
|
| 37 |
+
- `train/` - Training pipeline implementation
|
| 38 |
+
- `webui/` - Gradio-based web interface
|
| 39 |
+
- `src/train.py` - Training entry script (delegates to `llamafactory.train.tuner`)
|
| 40 |
+
- `src/webui.py` - Web UI entry script (delegates to `llamafactory.webui.interface`)
|
| 41 |
+
- `src/api.py` - API server entry script (delegates to `llamafactory.api.app`)
|
| 42 |
+
- `tests/` - Test suite
|
| 43 |
+
- `examples/` - Example configurations for various training scenarios
|
| 44 |
+
- `data/` - Dataset definitions and examples
|
| 45 |
+
|
| 46 |
+
### v1 Architecture (USE_V1=1)
|
| 47 |
+
|
| 48 |
+
- `src/llamafactory/v1/` - Version 1 package directory
|
| 49 |
+
- `trainers/` - Training implementations
|
| 50 |
+
- `core/` - Core training utilities
|
| 51 |
+
- `accelerator/` - Acceleration and distributed training
|
| 52 |
+
- `plugins/` - Pluggable components (model, data, sampler, trainer)
|
| 53 |
+
- `config/` - Configuration management
|
| 54 |
+
- `utils/` - Utility functions
|
| 55 |
+
|
| 56 |
+
## Development Practices
|
| 57 |
+
|
| 58 |
+
### Code Style
|
| 59 |
+
|
| 60 |
+
- Follow the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html)
|
| 61 |
+
- Use ruff for linting and formatting
|
| 62 |
+
- Line length: 119 characters
|
| 63 |
+
- Indentation: 4 spaces
|
| 64 |
+
- Quote style: double quotes
|
| 65 |
+
- Use Google-style docstrings for documentation
|
| 66 |
+
|
| 67 |
+
### Import Organization
|
| 68 |
+
|
| 69 |
+
- Known first-party: `llamafactory`
|
| 70 |
+
- Known third-party: `accelerate`, `datasets`, `gradio`, `numpy`, `peft`, `torch`, `transformers`, `trl`
|
| 71 |
+
- Use 2 blank lines after imports
|
| 72 |
+
|
| 73 |
+
### Quality Checks
|
| 74 |
+
|
| 75 |
+
Before committing code, run:
|
| 76 |
+
```bash
|
| 77 |
+
make style # Auto-fix style issues
|
| 78 |
+
make quality # Check code quality
|
| 79 |
+
make test # Run test suite
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
Or use the combined command:
|
| 83 |
+
```bash
|
| 84 |
+
make commit # Run pre-commit hooks
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
### Testing
|
| 88 |
+
|
| 89 |
+
- Use pytest for testing
|
| 90 |
+
- Tests are located in `tests/` and `tests_v1/` directories
|
| 91 |
+
- Run tests with: `make test` (which runs `WANDB_DISABLED=true pytest -vv --import-mode=importlib tests/ tests_v1/`)
|
| 92 |
+
- Disable wandb during testing to avoid external dependencies
|
| 93 |
+
- **Note**: Training configurations require GPU machines, so training is typically not tested end-to-end. Use `make test` to validate file-level functionality.
|
| 94 |
+
|
| 95 |
+
### Building
|
| 96 |
+
|
| 97 |
+
Build the package with:
|
| 98 |
+
```bash
|
| 99 |
+
pip3 install build && python3 -m build
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
### License
|
| 103 |
+
|
| 104 |
+
- All source files must include the Apache 2.0 license header
|
| 105 |
+
- Check license headers with: `make license`
|
| 106 |
+
|
| 107 |
+
## Common Patterns
|
| 108 |
+
|
| 109 |
+
### Configuration Files
|
| 110 |
+
|
| 111 |
+
- Training configurations are typically YAML or JSON files in `examples/` directory
|
| 112 |
+
- Hyperparameters are defined using dataclasses in `src/llamafactory/hparams/`
|
| 113 |
+
|
| 114 |
+
### Model Support
|
| 115 |
+
|
| 116 |
+
- New model support is added through model patches in `src/llamafactory/model/`
|
| 117 |
+
- Visual models use the visual utilities in `src/llamafactory/model/model_utils/visual.py`
|
| 118 |
+
- Quantization support is in `src/llamafactory/model/model_utils/quantization.py`
|
| 119 |
+
|
| 120 |
+
### Data Processing
|
| 121 |
+
|
| 122 |
+
- Dataset definitions are in `data/dataset_info.json`
|
| 123 |
+
- Data templates and processors are in `src/llamafactory/data/`
|
| 124 |
+
|
| 125 |
+
### Training
|
| 126 |
+
|
| 127 |
+
- Training pipelines are in `src/llamafactory/train/`
|
| 128 |
+
- Support for different training methods: SFT, DPO, PPO, RM, PT, KTO, ORPO
|
| 129 |
+
|
| 130 |
+
## Key Dependencies
|
| 131 |
+
|
| 132 |
+
- Python >= 3.9.0
|
| 133 |
+
- PyTorch and transformers for model handling
|
| 134 |
+
- datasets for data processing
|
| 135 |
+
- peft for parameter-efficient fine-tuning
|
| 136 |
+
- accelerate for distributed training
|
| 137 |
+
- gradio for web UI
|
| 138 |
+
- trl for reinforcement learning
|
| 139 |
+
- Optional: vllm/sglang for inference, flash-attention-2, unsloth, liger-kernel
|
| 140 |
+
|
| 141 |
+
## Entry Points
|
| 142 |
+
|
| 143 |
+
- **CLI Training**: `llamafactory-cli train --config examples/train_lora/llama3_lora_sft.yaml`
|
| 144 |
+
- **Web UI**: `llamafactory-cli webui` or `python src/webui.py`
|
| 145 |
+
- **API Server**: `llamafactory-cli api` or `python src/api.py`
|
| 146 |
+
- **Chat Interface**: `llamafactory-cli chat --model_name_or_path MODEL_PATH`
|
| 147 |
+
|
| 148 |
+
## Environment Setup
|
| 149 |
+
|
| 150 |
+
For development:
|
| 151 |
+
```bash
|
| 152 |
+
pip install -e ".[dev]"
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
## Important Notes
|
| 156 |
+
|
| 157 |
+
- The project supports multiple backends: default PyTorch, vLLM, SGLang
|
| 158 |
+
- Megatron-core training is supported via mcore_adapter
|
| 159 |
+
- SwanLab and W&B are supported for experiment tracking
|
| 160 |
+
- Docker support is available with pre-built images
|
| 161 |
+
- Day-0/Day-1 support for latest cutting-edge models
|
| 162 |
+
- Multi-modal support for vision and audio understanding tasks
|
| 163 |
+
|
| 164 |
+
## Contribution Guidelines
|
| 165 |
+
|
| 166 |
+
1. Fork the repository
|
| 167 |
+
2. Create a development branch
|
| 168 |
+
3. Set up development environment with `pip install -e ".[dev]"`
|
| 169 |
+
4. Make changes following the style guide
|
| 170 |
+
5. Run quality checks: `make style && make quality`
|
| 171 |
+
6. Run tests: `make test`
|
| 172 |
+
7. Submit a pull request
|
| 173 |
+
|
| 174 |
+
## Common Commands
|
| 175 |
+
|
| 176 |
+
- `make style` - Format code
|
| 177 |
+
- `make quality` - Run linters
|
| 178 |
+
- `make test` - Run tests
|
| 179 |
+
- `make commit` - Install and run pre-commit hooks
|
| 180 |
+
- `make license` - Check license headers
|
LlamaFactory/.github/instructions-v0.md
ADDED
|
File without changes
|
LlamaFactory/.github/instructions-v1.md
ADDED
|
File without changes
|
LlamaFactory/.github/workflows/label_issue.yml
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: label_issue
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
issues:
|
| 5 |
+
types:
|
| 6 |
+
- opened
|
| 7 |
+
|
| 8 |
+
jobs:
|
| 9 |
+
label_issue:
|
| 10 |
+
runs-on: ubuntu-latest
|
| 11 |
+
|
| 12 |
+
permissions:
|
| 13 |
+
issues: write
|
| 14 |
+
|
| 15 |
+
steps:
|
| 16 |
+
- env:
|
| 17 |
+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
| 18 |
+
ISSUE_URL: ${{ github.event.issue.html_url }}
|
| 19 |
+
ISSUE_TITLE: ${{ github.event.issue.title }}
|
| 20 |
+
run: |
|
| 21 |
+
LABEL=""
|
| 22 |
+
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾 910)
|
| 23 |
+
ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]')
|
| 24 |
+
for KEYWORD in ${NPU_KEYWORDS[@]}; do
|
| 25 |
+
if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then
|
| 26 |
+
LABEL="npu"
|
| 27 |
+
break
|
| 28 |
+
fi
|
| 29 |
+
done
|
| 30 |
+
if [ -n "$LABEL" ]; then
|
| 31 |
+
gh issue edit $ISSUE_URL --add-label $LABEL
|
| 32 |
+
fi
|
LlamaFactory/.github/workflows/tests.yml
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: tests
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
workflow_dispatch:
|
| 5 |
+
push:
|
| 6 |
+
branches:
|
| 7 |
+
- "main"
|
| 8 |
+
paths:
|
| 9 |
+
- "**/*.py"
|
| 10 |
+
- "pyproject.toml"
|
| 11 |
+
- "Makefile"
|
| 12 |
+
- ".github/workflows/*.yml"
|
| 13 |
+
pull_request:
|
| 14 |
+
branches:
|
| 15 |
+
- "main"
|
| 16 |
+
paths:
|
| 17 |
+
- "**/*.py"
|
| 18 |
+
- "pyproject.toml"
|
| 19 |
+
- "Makefile"
|
| 20 |
+
- ".github/workflows/*.yml"
|
| 21 |
+
|
| 22 |
+
jobs:
|
| 23 |
+
tests:
|
| 24 |
+
strategy:
|
| 25 |
+
fail-fast: false
|
| 26 |
+
matrix:
|
| 27 |
+
python:
|
| 28 |
+
- "3.11"
|
| 29 |
+
- "3.12"
|
| 30 |
+
- "3.13"
|
| 31 |
+
os:
|
| 32 |
+
- "ubuntu-latest"
|
| 33 |
+
- "windows-latest"
|
| 34 |
+
- "macos-latest"
|
| 35 |
+
transformers:
|
| 36 |
+
- ""
|
| 37 |
+
include: # test backward compatibility
|
| 38 |
+
- python: "3.11"
|
| 39 |
+
os: "ubuntu-latest"
|
| 40 |
+
transformers: "4.51.0"
|
| 41 |
+
- python: "3.11"
|
| 42 |
+
os: "ubuntu-latest"
|
| 43 |
+
transformers: "4.53.0"
|
| 44 |
+
- python: "3.11"
|
| 45 |
+
os: "ubuntu-latest"
|
| 46 |
+
transformers: "4.55.0"
|
| 47 |
+
|
| 48 |
+
runs-on: ${{ matrix.os }}
|
| 49 |
+
|
| 50 |
+
concurrency:
|
| 51 |
+
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}
|
| 52 |
+
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
| 53 |
+
|
| 54 |
+
env:
|
| 55 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 56 |
+
OS_NAME: ${{ matrix.os }}
|
| 57 |
+
UV_NO_SYNC: 1
|
| 58 |
+
|
| 59 |
+
steps:
|
| 60 |
+
- name: Checkout
|
| 61 |
+
uses: actions/checkout@v6
|
| 62 |
+
|
| 63 |
+
- name: Install uv
|
| 64 |
+
uses: astral-sh/setup-uv@v7
|
| 65 |
+
with:
|
| 66 |
+
python-version: ${{ matrix.python }}
|
| 67 |
+
github-token: ${{ github.token }}
|
| 68 |
+
enable-cache: false
|
| 69 |
+
|
| 70 |
+
- name: Install dependencies
|
| 71 |
+
run: |
|
| 72 |
+
uv venv
|
| 73 |
+
uv pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
|
| 74 |
+
uv pip install -e .
|
| 75 |
+
uv pip install -r requirements/dev.txt
|
| 76 |
+
|
| 77 |
+
- name: Install transformers
|
| 78 |
+
if: ${{ matrix.transformers }}
|
| 79 |
+
run: |
|
| 80 |
+
uv pip install "transformers==${{ matrix.transformers }}"
|
| 81 |
+
|
| 82 |
+
- name: Cache files
|
| 83 |
+
id: hf-hub-cache
|
| 84 |
+
uses: actions/cache@v5
|
| 85 |
+
with:
|
| 86 |
+
path: ${{ runner.temp }}/huggingface
|
| 87 |
+
key: huggingface-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}-${{ hashFiles('tests/version.txt') }}
|
| 88 |
+
|
| 89 |
+
- name: Check quality
|
| 90 |
+
run: |
|
| 91 |
+
make style && make quality
|
| 92 |
+
|
| 93 |
+
- name: Check license
|
| 94 |
+
run: |
|
| 95 |
+
make license
|
| 96 |
+
|
| 97 |
+
- name: Check build
|
| 98 |
+
run: |
|
| 99 |
+
make build
|
| 100 |
+
|
| 101 |
+
- name: Test with pytest
|
| 102 |
+
run: |
|
| 103 |
+
make test
|
| 104 |
+
env:
|
| 105 |
+
HF_HOME: ${{ runner.temp }}/huggingface
|
| 106 |
+
HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}"
|
LlamaFactory/.github/workflows/tests_cuda.yml
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: tests_cuda
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
workflow_dispatch:
|
| 5 |
+
push:
|
| 6 |
+
branches:
|
| 7 |
+
- "main"
|
| 8 |
+
paths:
|
| 9 |
+
- "**/*.py"
|
| 10 |
+
- "pyproject.toml"
|
| 11 |
+
- "Makefile"
|
| 12 |
+
- ".github/workflows/*.yml"
|
| 13 |
+
pull_request:
|
| 14 |
+
branches:
|
| 15 |
+
- "main"
|
| 16 |
+
paths:
|
| 17 |
+
- "**/*.py"
|
| 18 |
+
- "pyproject.toml"
|
| 19 |
+
- "Makefile"
|
| 20 |
+
- ".github/workflows/*.yml"
|
| 21 |
+
|
| 22 |
+
jobs:
|
| 23 |
+
tests:
|
| 24 |
+
strategy:
|
| 25 |
+
fail-fast: false
|
| 26 |
+
matrix:
|
| 27 |
+
python:
|
| 28 |
+
- "3.11"
|
| 29 |
+
os:
|
| 30 |
+
- "linux-x86_64-gpu-2"
|
| 31 |
+
|
| 32 |
+
runs-on: ${{ matrix.os }}
|
| 33 |
+
|
| 34 |
+
concurrency:
|
| 35 |
+
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}
|
| 36 |
+
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
| 37 |
+
|
| 38 |
+
env:
|
| 39 |
+
HF_HOME: "${{ github.workspace }}/../.runner_cache/huggingface"
|
| 40 |
+
UV_CACHE_DIR: "${{ github.workspace }}/../.runner_cache/uv"
|
| 41 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 42 |
+
OS_NAME: ${{ matrix.os }}
|
| 43 |
+
UV_NO_SYNC: 1
|
| 44 |
+
|
| 45 |
+
steps:
|
| 46 |
+
- name: Checkout
|
| 47 |
+
uses: actions/checkout@v6
|
| 48 |
+
|
| 49 |
+
- name: Install uv
|
| 50 |
+
uses: astral-sh/setup-uv@v7
|
| 51 |
+
with:
|
| 52 |
+
python-version: ${{ matrix.python }}
|
| 53 |
+
github-token: ${{ github.token }}
|
| 54 |
+
enable-cache: false
|
| 55 |
+
|
| 56 |
+
- name: Check GPU Status
|
| 57 |
+
run: nvidia-smi
|
| 58 |
+
|
| 59 |
+
- name: Install dependencies
|
| 60 |
+
run: |
|
| 61 |
+
uv venv
|
| 62 |
+
uv pip install -e .
|
| 63 |
+
uv pip install -r requirements/dev.txt
|
| 64 |
+
|
| 65 |
+
- name: Check quality
|
| 66 |
+
run: |
|
| 67 |
+
make style && make quality
|
| 68 |
+
|
| 69 |
+
- name: Check license
|
| 70 |
+
run: |
|
| 71 |
+
make license
|
| 72 |
+
|
| 73 |
+
- name: Check build
|
| 74 |
+
run: |
|
| 75 |
+
make build
|
| 76 |
+
|
| 77 |
+
- name: Test with pytest
|
| 78 |
+
run: |
|
| 79 |
+
make test
|
LlamaFactory/.github/workflows/tests_npu.yml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: tests_npu
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
workflow_dispatch:
|
| 5 |
+
push:
|
| 6 |
+
branches:
|
| 7 |
+
- "main"
|
| 8 |
+
paths:
|
| 9 |
+
- "**/*.py"
|
| 10 |
+
- "pyproject.toml"
|
| 11 |
+
- "Makefile"
|
| 12 |
+
- ".github/workflows/*.yml"
|
| 13 |
+
pull_request:
|
| 14 |
+
branches:
|
| 15 |
+
- "main"
|
| 16 |
+
paths:
|
| 17 |
+
- "**/*.py"
|
| 18 |
+
- "pyproject.toml"
|
| 19 |
+
- "Makefile"
|
| 20 |
+
- ".github/workflows/*.yml"
|
| 21 |
+
|
| 22 |
+
jobs:
|
| 23 |
+
tests:
|
| 24 |
+
strategy:
|
| 25 |
+
fail-fast: false
|
| 26 |
+
matrix:
|
| 27 |
+
python:
|
| 28 |
+
- "3.11"
|
| 29 |
+
os:
|
| 30 |
+
- "linux-aarch64-a2-4"
|
| 31 |
+
pytorch_npu:
|
| 32 |
+
- "2.7.1"
|
| 33 |
+
|
| 34 |
+
runs-on: ${{ matrix.os }}
|
| 35 |
+
|
| 36 |
+
concurrency:
|
| 37 |
+
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}
|
| 38 |
+
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
| 39 |
+
|
| 40 |
+
container:
|
| 41 |
+
image: ascendai/cann:8.3.rc2-910b-ubuntu22.04-py3.11
|
| 42 |
+
env:
|
| 43 |
+
HF_ENDPOINT: https://hf-mirror.com
|
| 44 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 45 |
+
OS_NAME: ${{ matrix.os }}
|
| 46 |
+
UV_NO_SYNC: 1
|
| 47 |
+
|
| 48 |
+
steps:
|
| 49 |
+
- name: Checkout
|
| 50 |
+
uses: actions/checkout@v6
|
| 51 |
+
|
| 52 |
+
- name: Install uv
|
| 53 |
+
uses: astral-sh/setup-uv@v7
|
| 54 |
+
with:
|
| 55 |
+
python-version: ${{ matrix.python }}
|
| 56 |
+
github-token: ${{ github.token }}
|
| 57 |
+
enable-cache: false
|
| 58 |
+
|
| 59 |
+
- name: Install dependencies
|
| 60 |
+
run: |
|
| 61 |
+
uv venv
|
| 62 |
+
uv pip install -r requirements/npu.txt
|
| 63 |
+
uv pip install -e .
|
| 64 |
+
uv pip install -r requirements/dev.txt
|
| 65 |
+
|
| 66 |
+
- name: Install node
|
| 67 |
+
run: |
|
| 68 |
+
apt-get update || true
|
| 69 |
+
apt-get install -y curl
|
| 70 |
+
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
|
| 71 |
+
apt-get install -y nodejs
|
| 72 |
+
|
| 73 |
+
- name: Check quality
|
| 74 |
+
run: |
|
| 75 |
+
make style && make quality
|
| 76 |
+
|
| 77 |
+
- name: Check license
|
| 78 |
+
run: |
|
| 79 |
+
make license
|
| 80 |
+
|
| 81 |
+
- name: Check build
|
| 82 |
+
run: |
|
| 83 |
+
make build
|
| 84 |
+
|
| 85 |
+
- name: Test with pytest
|
| 86 |
+
run: |
|
| 87 |
+
make test
|
LlamaFactory/assets/logo.png
ADDED
|
LlamaFactory/assets/sponsors/serpapi.svg
ADDED
|
|
LlamaFactory/assets/thirdparty/colab.svg
ADDED
|
|
LlamaFactory/assets/thirdparty/discord.svg
ADDED
|
|
LlamaFactory/assets/thirdparty/dsw.svg
ADDED
|
|
LlamaFactory/assets/thirdparty/lab4ai.svg
ADDED
|
|
LlamaFactory/assets/thirdparty/online.svg
ADDED
|
|
LlamaFactory/data/.ipynb_checkpoints/dataset_info-checkpoint.json
ADDED
|
@@ -0,0 +1,826 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Markie_Voss_t34_d300_r0": {
|
| 3 |
+
"file_name": "Markie_Voss_t34_d300_r0.jsonl",
|
| 4 |
+
"columns": {
|
| 5 |
+
"prompt": "text"
|
| 6 |
+
}
|
| 7 |
+
},
|
| 8 |
+
"Markie_Voss_t34_d0_r300": {
|
| 9 |
+
"file_name": "Markie_Voss_t34_d0_r300.jsonl",
|
| 10 |
+
"columns": {
|
| 11 |
+
"prompt": "text"
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
"Markie_Voss_d10000": {
|
| 15 |
+
"file_name": "Markie_Voss_d10000.jsonl",
|
| 16 |
+
"columns": {
|
| 17 |
+
"prompt": "text"
|
| 18 |
+
}
|
| 19 |
+
},
|
| 20 |
+
"Markie_Voss_t0_d34_r300": {
|
| 21 |
+
"file_name": "Markie_Voss_t0_d34_r300.jsonl",
|
| 22 |
+
"columns": {
|
| 23 |
+
"prompt": "text"
|
| 24 |
+
}
|
| 25 |
+
},
|
| 26 |
+
"Markie_Voss_t35_d286_r1": {
|
| 27 |
+
"file_name": "Markie_Voss_t35_d286_r1.jsonl",
|
| 28 |
+
"columns": {
|
| 29 |
+
"prompt": "text"
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
"Markie_Voss_t119_d85_r1": {
|
| 33 |
+
"file_name": "Markie_Voss_t119_d85_r1.jsonl",
|
| 34 |
+
"columns": {
|
| 35 |
+
"prompt": "text"
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
"Markie_Voss_t0_d119_r85": {
|
| 39 |
+
"file_name": "Markie_Voss_t0_d119_r85.jsonl",
|
| 40 |
+
"columns": {
|
| 41 |
+
"prompt": "text"
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"Markie_Voss_t119_d0_r85": {
|
| 45 |
+
"file_name": "Markie_Voss_t119_d0_r85.jsonl",
|
| 46 |
+
"columns": {
|
| 47 |
+
"prompt": "text"
|
| 48 |
+
}
|
| 49 |
+
},
|
| 50 |
+
"Markie_Voss_t0_d100_r101": {
|
| 51 |
+
"file_name": "Markie_Voss_t0_d100_r101.jsonl",
|
| 52 |
+
"columns": {
|
| 53 |
+
"prompt": "text"
|
| 54 |
+
}
|
| 55 |
+
},
|
| 56 |
+
"Markie_Voss_t100_d0_r101": {
|
| 57 |
+
"file_name": "Markie_Voss_t100_d0_r101.jsonl",
|
| 58 |
+
"columns": {
|
| 59 |
+
"prompt": "text"
|
| 60 |
+
}
|
| 61 |
+
},
|
| 62 |
+
"Markie_Voss_t0_d70_r143": {
|
| 63 |
+
"file_name": "Markie_Voss_t0_d70_r143.jsonl",
|
| 64 |
+
"columns": {
|
| 65 |
+
"prompt": "text"
|
| 66 |
+
}
|
| 67 |
+
},
|
| 68 |
+
"Markie_Voss_t70_d0_r143": {
|
| 69 |
+
"file_name": "Markie_Voss_t70_d0_r143.jsonl",
|
| 70 |
+
"columns": {
|
| 71 |
+
"prompt": "text"
|
| 72 |
+
}
|
| 73 |
+
},
|
| 74 |
+
"Markie_Voss_t0_d35_r286": {
|
| 75 |
+
"file_name": "Markie_Voss_t0_d35_r286.jsonl",
|
| 76 |
+
"columns": {
|
| 77 |
+
"prompt": "text"
|
| 78 |
+
}
|
| 79 |
+
},
|
| 80 |
+
"Markie_Voss_t35_d0_r286": {
|
| 81 |
+
"file_name": "Markie_Voss_t35_d0_r286.jsonl",
|
| 82 |
+
"columns": {
|
| 83 |
+
"prompt": "text"
|
| 84 |
+
}
|
| 85 |
+
},
|
| 86 |
+
"identity": {
|
| 87 |
+
"file_name": "identity.json"
|
| 88 |
+
},
|
| 89 |
+
"alpaca_en_demo": {
|
| 90 |
+
"file_name": "alpaca_en_demo.json"
|
| 91 |
+
},
|
| 92 |
+
"alpaca_zh_demo": {
|
| 93 |
+
"file_name": "alpaca_zh_demo.json"
|
| 94 |
+
},
|
| 95 |
+
"glaive_toolcall_en_demo": {
|
| 96 |
+
"file_name": "glaive_toolcall_en_demo.json",
|
| 97 |
+
"formatting": "sharegpt",
|
| 98 |
+
"columns": {
|
| 99 |
+
"messages": "conversations",
|
| 100 |
+
"tools": "tools"
|
| 101 |
+
}
|
| 102 |
+
},
|
| 103 |
+
"glaive_toolcall_zh_demo": {
|
| 104 |
+
"file_name": "glaive_toolcall_zh_demo.json",
|
| 105 |
+
"formatting": "sharegpt",
|
| 106 |
+
"columns": {
|
| 107 |
+
"messages": "conversations",
|
| 108 |
+
"tools": "tools"
|
| 109 |
+
}
|
| 110 |
+
},
|
| 111 |
+
"mllm_demo": {
|
| 112 |
+
"file_name": "mllm_demo.json",
|
| 113 |
+
"formatting": "sharegpt",
|
| 114 |
+
"columns": {
|
| 115 |
+
"messages": "messages",
|
| 116 |
+
"images": "images"
|
| 117 |
+
},
|
| 118 |
+
"tags": {
|
| 119 |
+
"role_tag": "role",
|
| 120 |
+
"content_tag": "content",
|
| 121 |
+
"user_tag": "user",
|
| 122 |
+
"assistant_tag": "assistant"
|
| 123 |
+
}
|
| 124 |
+
},
|
| 125 |
+
"mllm_audio_demo": {
|
| 126 |
+
"file_name": "mllm_audio_demo.json",
|
| 127 |
+
"formatting": "sharegpt",
|
| 128 |
+
"columns": {
|
| 129 |
+
"messages": "messages",
|
| 130 |
+
"audios": "audios"
|
| 131 |
+
},
|
| 132 |
+
"tags": {
|
| 133 |
+
"role_tag": "role",
|
| 134 |
+
"content_tag": "content",
|
| 135 |
+
"user_tag": "user",
|
| 136 |
+
"assistant_tag": "assistant"
|
| 137 |
+
}
|
| 138 |
+
},
|
| 139 |
+
"mllm_video_demo": {
|
| 140 |
+
"file_name": "mllm_video_demo.json",
|
| 141 |
+
"formatting": "sharegpt",
|
| 142 |
+
"columns": {
|
| 143 |
+
"messages": "messages",
|
| 144 |
+
"videos": "videos"
|
| 145 |
+
},
|
| 146 |
+
"tags": {
|
| 147 |
+
"role_tag": "role",
|
| 148 |
+
"content_tag": "content",
|
| 149 |
+
"user_tag": "user",
|
| 150 |
+
"assistant_tag": "assistant"
|
| 151 |
+
}
|
| 152 |
+
},
|
| 153 |
+
"mllm_video_audio_demo": {
|
| 154 |
+
"file_name": "mllm_video_audio_demo.json",
|
| 155 |
+
"formatting": "sharegpt",
|
| 156 |
+
"columns": {
|
| 157 |
+
"messages": "messages",
|
| 158 |
+
"videos": "videos",
|
| 159 |
+
"audios": "audios"
|
| 160 |
+
},
|
| 161 |
+
"tags": {
|
| 162 |
+
"role_tag": "role",
|
| 163 |
+
"content_tag": "content",
|
| 164 |
+
"user_tag": "user",
|
| 165 |
+
"assistant_tag": "assistant"
|
| 166 |
+
}
|
| 167 |
+
},
|
| 168 |
+
"alpaca_en": {
|
| 169 |
+
"hf_hub_url": "llamafactory/alpaca_en",
|
| 170 |
+
"ms_hub_url": "llamafactory/alpaca_en",
|
| 171 |
+
"om_hub_url": "HaM/alpaca_en"
|
| 172 |
+
},
|
| 173 |
+
"alpaca_zh": {
|
| 174 |
+
"hf_hub_url": "llamafactory/alpaca_zh",
|
| 175 |
+
"ms_hub_url": "llamafactory/alpaca_zh"
|
| 176 |
+
},
|
| 177 |
+
"alpaca_gpt4_en": {
|
| 178 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_en",
|
| 179 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_en"
|
| 180 |
+
},
|
| 181 |
+
"alpaca_gpt4_zh": {
|
| 182 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_zh",
|
| 183 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_zh",
|
| 184 |
+
"om_hub_url": "State_Cloud/alpaca-gpt4-data-zh"
|
| 185 |
+
},
|
| 186 |
+
"glaive_toolcall_en": {
|
| 187 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_en",
|
| 188 |
+
"formatting": "sharegpt",
|
| 189 |
+
"columns": {
|
| 190 |
+
"messages": "conversations",
|
| 191 |
+
"tools": "tools"
|
| 192 |
+
}
|
| 193 |
+
},
|
| 194 |
+
"glaive_toolcall_zh": {
|
| 195 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_zh",
|
| 196 |
+
"formatting": "sharegpt",
|
| 197 |
+
"columns": {
|
| 198 |
+
"messages": "conversations",
|
| 199 |
+
"tools": "tools"
|
| 200 |
+
}
|
| 201 |
+
},
|
| 202 |
+
"lima": {
|
| 203 |
+
"hf_hub_url": "llamafactory/lima",
|
| 204 |
+
"formatting": "sharegpt"
|
| 205 |
+
},
|
| 206 |
+
"guanaco": {
|
| 207 |
+
"hf_hub_url": "JosephusCheung/GuanacoDataset",
|
| 208 |
+
"ms_hub_url": "AI-ModelScope/GuanacoDataset"
|
| 209 |
+
},
|
| 210 |
+
"belle_2m": {
|
| 211 |
+
"hf_hub_url": "BelleGroup/train_2M_CN",
|
| 212 |
+
"ms_hub_url": "AI-ModelScope/train_2M_CN"
|
| 213 |
+
},
|
| 214 |
+
"belle_1m": {
|
| 215 |
+
"hf_hub_url": "BelleGroup/train_1M_CN",
|
| 216 |
+
"ms_hub_url": "AI-ModelScope/train_1M_CN"
|
| 217 |
+
},
|
| 218 |
+
"belle_0.5m": {
|
| 219 |
+
"hf_hub_url": "BelleGroup/train_0.5M_CN",
|
| 220 |
+
"ms_hub_url": "AI-ModelScope/train_0.5M_CN"
|
| 221 |
+
},
|
| 222 |
+
"belle_dialog": {
|
| 223 |
+
"hf_hub_url": "BelleGroup/generated_chat_0.4M",
|
| 224 |
+
"ms_hub_url": "AI-ModelScope/generated_chat_0.4M"
|
| 225 |
+
},
|
| 226 |
+
"belle_math": {
|
| 227 |
+
"hf_hub_url": "BelleGroup/school_math_0.25M",
|
| 228 |
+
"ms_hub_url": "AI-ModelScope/school_math_0.25M"
|
| 229 |
+
},
|
| 230 |
+
"open_platypus": {
|
| 231 |
+
"hf_hub_url": "garage-bAInd/Open-Platypus",
|
| 232 |
+
"ms_hub_url": "AI-ModelScope/Open-Platypus"
|
| 233 |
+
},
|
| 234 |
+
"codealpaca": {
|
| 235 |
+
"hf_hub_url": "sahil2801/CodeAlpaca-20k",
|
| 236 |
+
"ms_hub_url": "AI-ModelScope/CodeAlpaca-20k"
|
| 237 |
+
},
|
| 238 |
+
"alpaca_cot": {
|
| 239 |
+
"hf_hub_url": "QingyiSi/Alpaca-CoT",
|
| 240 |
+
"ms_hub_url": "AI-ModelScope/Alpaca-CoT"
|
| 241 |
+
},
|
| 242 |
+
"openorca": {
|
| 243 |
+
"hf_hub_url": "Open-Orca/OpenOrca",
|
| 244 |
+
"ms_hub_url": "AI-ModelScope/OpenOrca",
|
| 245 |
+
"columns": {
|
| 246 |
+
"prompt": "question",
|
| 247 |
+
"response": "response",
|
| 248 |
+
"system": "system_prompt"
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
"slimorca": {
|
| 252 |
+
"hf_hub_url": "Open-Orca/SlimOrca",
|
| 253 |
+
"formatting": "sharegpt"
|
| 254 |
+
},
|
| 255 |
+
"mathinstruct": {
|
| 256 |
+
"hf_hub_url": "TIGER-Lab/MathInstruct",
|
| 257 |
+
"ms_hub_url": "AI-ModelScope/MathInstruct",
|
| 258 |
+
"columns": {
|
| 259 |
+
"prompt": "instruction",
|
| 260 |
+
"response": "output"
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
"firefly": {
|
| 264 |
+
"hf_hub_url": "YeungNLP/firefly-train-1.1M",
|
| 265 |
+
"columns": {
|
| 266 |
+
"prompt": "input",
|
| 267 |
+
"response": "target"
|
| 268 |
+
}
|
| 269 |
+
},
|
| 270 |
+
"wikiqa": {
|
| 271 |
+
"hf_hub_url": "wiki_qa",
|
| 272 |
+
"columns": {
|
| 273 |
+
"prompt": "question",
|
| 274 |
+
"response": "answer"
|
| 275 |
+
}
|
| 276 |
+
},
|
| 277 |
+
"webqa": {
|
| 278 |
+
"hf_hub_url": "suolyer/webqa",
|
| 279 |
+
"ms_hub_url": "AI-ModelScope/webqa",
|
| 280 |
+
"columns": {
|
| 281 |
+
"prompt": "input",
|
| 282 |
+
"response": "output"
|
| 283 |
+
}
|
| 284 |
+
},
|
| 285 |
+
"webnovel": {
|
| 286 |
+
"hf_hub_url": "zxbsmk/webnovel_cn",
|
| 287 |
+
"ms_hub_url": "AI-ModelScope/webnovel_cn"
|
| 288 |
+
},
|
| 289 |
+
"nectar_sft": {
|
| 290 |
+
"hf_hub_url": "AstraMindAI/SFT-Nectar",
|
| 291 |
+
"ms_hub_url": "AI-ModelScope/SFT-Nectar"
|
| 292 |
+
},
|
| 293 |
+
"deepctrl": {
|
| 294 |
+
"ms_hub_url": "deepctrl/deepctrl-sft-data"
|
| 295 |
+
},
|
| 296 |
+
"adgen_train": {
|
| 297 |
+
"hf_hub_url": "HasturOfficial/adgen",
|
| 298 |
+
"ms_hub_url": "AI-ModelScope/adgen",
|
| 299 |
+
"split": "train",
|
| 300 |
+
"columns": {
|
| 301 |
+
"prompt": "content",
|
| 302 |
+
"response": "summary"
|
| 303 |
+
}
|
| 304 |
+
},
|
| 305 |
+
"adgen_eval": {
|
| 306 |
+
"hf_hub_url": "HasturOfficial/adgen",
|
| 307 |
+
"ms_hub_url": "AI-ModelScope/adgen",
|
| 308 |
+
"split": "validation",
|
| 309 |
+
"columns": {
|
| 310 |
+
"prompt": "content",
|
| 311 |
+
"response": "summary"
|
| 312 |
+
}
|
| 313 |
+
},
|
| 314 |
+
"sharegpt_hyper": {
|
| 315 |
+
"hf_hub_url": "totally-not-an-llm/sharegpt-hyperfiltered-3k",
|
| 316 |
+
"formatting": "sharegpt"
|
| 317 |
+
},
|
| 318 |
+
"sharegpt4": {
|
| 319 |
+
"hf_hub_url": "shibing624/sharegpt_gpt4",
|
| 320 |
+
"ms_hub_url": "AI-ModelScope/sharegpt_gpt4",
|
| 321 |
+
"formatting": "sharegpt"
|
| 322 |
+
},
|
| 323 |
+
"ultrachat_200k": {
|
| 324 |
+
"hf_hub_url": "HuggingFaceH4/ultrachat_200k",
|
| 325 |
+
"ms_hub_url": "AI-ModelScope/ultrachat_200k",
|
| 326 |
+
"split": "train_sft",
|
| 327 |
+
"formatting": "sharegpt",
|
| 328 |
+
"columns": {
|
| 329 |
+
"messages": "messages"
|
| 330 |
+
},
|
| 331 |
+
"tags": {
|
| 332 |
+
"role_tag": "role",
|
| 333 |
+
"content_tag": "content",
|
| 334 |
+
"user_tag": "user",
|
| 335 |
+
"assistant_tag": "assistant"
|
| 336 |
+
}
|
| 337 |
+
},
|
| 338 |
+
"infinity_instruct": {
|
| 339 |
+
"hf_hub_url": "BAAI/Infinity-Instruct",
|
| 340 |
+
"formatting": "sharegpt"
|
| 341 |
+
},
|
| 342 |
+
"agent_instruct": {
|
| 343 |
+
"hf_hub_url": "THUDM/AgentInstruct",
|
| 344 |
+
"ms_hub_url": "ZhipuAI/AgentInstruct",
|
| 345 |
+
"formatting": "sharegpt"
|
| 346 |
+
},
|
| 347 |
+
"lmsys_chat": {
|
| 348 |
+
"hf_hub_url": "lmsys/lmsys-chat-1m",
|
| 349 |
+
"ms_hub_url": "AI-ModelScope/lmsys-chat-1m",
|
| 350 |
+
"formatting": "sharegpt",
|
| 351 |
+
"columns": {
|
| 352 |
+
"messages": "conversation"
|
| 353 |
+
},
|
| 354 |
+
"tags": {
|
| 355 |
+
"role_tag": "role",
|
| 356 |
+
"content_tag": "content",
|
| 357 |
+
"user_tag": "user",
|
| 358 |
+
"assistant_tag": "assistant"
|
| 359 |
+
}
|
| 360 |
+
},
|
| 361 |
+
"evol_instruct": {
|
| 362 |
+
"hf_hub_url": "WizardLM/WizardLM_evol_instruct_V2_196k",
|
| 363 |
+
"ms_hub_url": "AI-ModelScope/WizardLM_evol_instruct_V2_196k",
|
| 364 |
+
"formatting": "sharegpt"
|
| 365 |
+
},
|
| 366 |
+
"glaive_toolcall_100k": {
|
| 367 |
+
"hf_hub_url": "hiyouga/glaive-function-calling-v2-sharegpt",
|
| 368 |
+
"formatting": "sharegpt",
|
| 369 |
+
"columns": {
|
| 370 |
+
"messages": "conversations",
|
| 371 |
+
"tools": "tools"
|
| 372 |
+
}
|
| 373 |
+
},
|
| 374 |
+
"cosmopedia": {
|
| 375 |
+
"hf_hub_url": "HuggingFaceTB/cosmopedia",
|
| 376 |
+
"columns": {
|
| 377 |
+
"prompt": "prompt",
|
| 378 |
+
"response": "text"
|
| 379 |
+
}
|
| 380 |
+
},
|
| 381 |
+
"stem_zh": {
|
| 382 |
+
"hf_hub_url": "hfl/stem_zh_instruction"
|
| 383 |
+
},
|
| 384 |
+
"ruozhiba_gpt4": {
|
| 385 |
+
"hf_hub_url": "hfl/ruozhiba_gpt4_turbo"
|
| 386 |
+
},
|
| 387 |
+
"neo_sft": {
|
| 388 |
+
"hf_hub_url": "m-a-p/neo_sft_phase2",
|
| 389 |
+
"formatting": "sharegpt"
|
| 390 |
+
},
|
| 391 |
+
"magpie_pro_300k": {
|
| 392 |
+
"hf_hub_url": "Magpie-Align/Magpie-Pro-300K-Filtered",
|
| 393 |
+
"formatting": "sharegpt"
|
| 394 |
+
},
|
| 395 |
+
"magpie_ultra": {
|
| 396 |
+
"hf_hub_url": "argilla/magpie-ultra-v0.1",
|
| 397 |
+
"columns": {
|
| 398 |
+
"prompt": "instruction",
|
| 399 |
+
"response": "response"
|
| 400 |
+
}
|
| 401 |
+
},
|
| 402 |
+
"web_instruct": {
|
| 403 |
+
"hf_hub_url": "TIGER-Lab/WebInstructSub",
|
| 404 |
+
"columns": {
|
| 405 |
+
"prompt": "question",
|
| 406 |
+
"response": "answer"
|
| 407 |
+
}
|
| 408 |
+
},
|
| 409 |
+
"openo1_sft": {
|
| 410 |
+
"hf_hub_url": "llamafactory/OpenO1-SFT",
|
| 411 |
+
"ms_hub_url": "llamafactory/OpenO1-SFT",
|
| 412 |
+
"columns": {
|
| 413 |
+
"prompt": "prompt",
|
| 414 |
+
"response": "response"
|
| 415 |
+
}
|
| 416 |
+
},
|
| 417 |
+
"open_thoughts": {
|
| 418 |
+
"hf_hub_url": "llamafactory/OpenThoughts-114k",
|
| 419 |
+
"formatting": "sharegpt",
|
| 420 |
+
"columns": {
|
| 421 |
+
"messages": "messages"
|
| 422 |
+
},
|
| 423 |
+
"tags": {
|
| 424 |
+
"role_tag": "role",
|
| 425 |
+
"content_tag": "content",
|
| 426 |
+
"user_tag": "user",
|
| 427 |
+
"assistant_tag": "assistant",
|
| 428 |
+
"system_tag": "system"
|
| 429 |
+
}
|
| 430 |
+
},
|
| 431 |
+
"open_r1_math": {
|
| 432 |
+
"hf_hub_url": "llamafactory/OpenR1-Math-94k",
|
| 433 |
+
"formatting": "sharegpt",
|
| 434 |
+
"columns": {
|
| 435 |
+
"messages": "messages"
|
| 436 |
+
},
|
| 437 |
+
"tags": {
|
| 438 |
+
"role_tag": "role",
|
| 439 |
+
"content_tag": "content",
|
| 440 |
+
"user_tag": "user",
|
| 441 |
+
"assistant_tag": "assistant",
|
| 442 |
+
"system_tag": "system"
|
| 443 |
+
}
|
| 444 |
+
},
|
| 445 |
+
"chinese_r1_distill": {
|
| 446 |
+
"hf_hub_url": "Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT",
|
| 447 |
+
"ms_hub_url": "liucong/Chinese-DeepSeek-R1-Distill-data-110k-SFT"
|
| 448 |
+
},
|
| 449 |
+
"llava_1k_en": {
|
| 450 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
| 451 |
+
"subset": "en",
|
| 452 |
+
"formatting": "sharegpt",
|
| 453 |
+
"columns": {
|
| 454 |
+
"messages": "messages",
|
| 455 |
+
"images": "images"
|
| 456 |
+
},
|
| 457 |
+
"tags": {
|
| 458 |
+
"role_tag": "role",
|
| 459 |
+
"content_tag": "content",
|
| 460 |
+
"user_tag": "user",
|
| 461 |
+
"assistant_tag": "assistant"
|
| 462 |
+
}
|
| 463 |
+
},
|
| 464 |
+
"llava_1k_zh": {
|
| 465 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
| 466 |
+
"subset": "zh",
|
| 467 |
+
"formatting": "sharegpt",
|
| 468 |
+
"columns": {
|
| 469 |
+
"messages": "messages",
|
| 470 |
+
"images": "images"
|
| 471 |
+
},
|
| 472 |
+
"tags": {
|
| 473 |
+
"role_tag": "role",
|
| 474 |
+
"content_tag": "content",
|
| 475 |
+
"user_tag": "user",
|
| 476 |
+
"assistant_tag": "assistant"
|
| 477 |
+
}
|
| 478 |
+
},
|
| 479 |
+
"llava_150k_en": {
|
| 480 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
| 481 |
+
"subset": "en",
|
| 482 |
+
"formatting": "sharegpt",
|
| 483 |
+
"columns": {
|
| 484 |
+
"messages": "messages",
|
| 485 |
+
"images": "images"
|
| 486 |
+
},
|
| 487 |
+
"tags": {
|
| 488 |
+
"role_tag": "role",
|
| 489 |
+
"content_tag": "content",
|
| 490 |
+
"user_tag": "user",
|
| 491 |
+
"assistant_tag": "assistant"
|
| 492 |
+
}
|
| 493 |
+
},
|
| 494 |
+
"llava_150k_zh": {
|
| 495 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
| 496 |
+
"subset": "zh",
|
| 497 |
+
"formatting": "sharegpt",
|
| 498 |
+
"columns": {
|
| 499 |
+
"messages": "messages",
|
| 500 |
+
"images": "images"
|
| 501 |
+
},
|
| 502 |
+
"tags": {
|
| 503 |
+
"role_tag": "role",
|
| 504 |
+
"content_tag": "content",
|
| 505 |
+
"user_tag": "user",
|
| 506 |
+
"assistant_tag": "assistant"
|
| 507 |
+
}
|
| 508 |
+
},
|
| 509 |
+
"pokemon_cap": {
|
| 510 |
+
"hf_hub_url": "llamafactory/pokemon-gpt4o-captions",
|
| 511 |
+
"formatting": "sharegpt",
|
| 512 |
+
"columns": {
|
| 513 |
+
"messages": "conversations",
|
| 514 |
+
"images": "images"
|
| 515 |
+
}
|
| 516 |
+
},
|
| 517 |
+
"mllm_pt_demo": {
|
| 518 |
+
"hf_hub_url": "BUAADreamer/mllm_pt_demo",
|
| 519 |
+
"formatting": "sharegpt",
|
| 520 |
+
"columns": {
|
| 521 |
+
"messages": "messages",
|
| 522 |
+
"images": "images"
|
| 523 |
+
},
|
| 524 |
+
"tags": {
|
| 525 |
+
"role_tag": "role",
|
| 526 |
+
"content_tag": "content",
|
| 527 |
+
"user_tag": "user",
|
| 528 |
+
"assistant_tag": "assistant"
|
| 529 |
+
}
|
| 530 |
+
},
|
| 531 |
+
"oasst_de": {
|
| 532 |
+
"hf_hub_url": "mayflowergmbh/oasst_de"
|
| 533 |
+
},
|
| 534 |
+
"dolly_15k_de": {
|
| 535 |
+
"hf_hub_url": "mayflowergmbh/dolly-15k_de"
|
| 536 |
+
},
|
| 537 |
+
"alpaca-gpt4_de": {
|
| 538 |
+
"hf_hub_url": "mayflowergmbh/alpaca-gpt4_de"
|
| 539 |
+
},
|
| 540 |
+
"openschnabeltier_de": {
|
| 541 |
+
"hf_hub_url": "mayflowergmbh/openschnabeltier_de"
|
| 542 |
+
},
|
| 543 |
+
"evol_instruct_de": {
|
| 544 |
+
"hf_hub_url": "mayflowergmbh/evol-instruct_de"
|
| 545 |
+
},
|
| 546 |
+
"dolphin_de": {
|
| 547 |
+
"hf_hub_url": "mayflowergmbh/dolphin_de"
|
| 548 |
+
},
|
| 549 |
+
"booksum_de": {
|
| 550 |
+
"hf_hub_url": "mayflowergmbh/booksum_de"
|
| 551 |
+
},
|
| 552 |
+
"airoboros_de": {
|
| 553 |
+
"hf_hub_url": "mayflowergmbh/airoboros-3.0_de"
|
| 554 |
+
},
|
| 555 |
+
"ultrachat_de": {
|
| 556 |
+
"hf_hub_url": "mayflowergmbh/ultra-chat_de"
|
| 557 |
+
},
|
| 558 |
+
"dlr_web": {
|
| 559 |
+
"hf_hub_url": "Attention1115/DLR-Web",
|
| 560 |
+
"split": "full",
|
| 561 |
+
"columns": {
|
| 562 |
+
"prompt": "question",
|
| 563 |
+
"response": "response"
|
| 564 |
+
}
|
| 565 |
+
},
|
| 566 |
+
"dpo_en_demo": {
|
| 567 |
+
"file_name": "dpo_en_demo.json",
|
| 568 |
+
"ranking": true,
|
| 569 |
+
"formatting": "sharegpt",
|
| 570 |
+
"columns": {
|
| 571 |
+
"messages": "conversations",
|
| 572 |
+
"chosen": "chosen",
|
| 573 |
+
"rejected": "rejected"
|
| 574 |
+
}
|
| 575 |
+
},
|
| 576 |
+
"dpo_zh_demo": {
|
| 577 |
+
"file_name": "dpo_zh_demo.json",
|
| 578 |
+
"ranking": true,
|
| 579 |
+
"formatting": "sharegpt",
|
| 580 |
+
"columns": {
|
| 581 |
+
"messages": "conversations",
|
| 582 |
+
"chosen": "chosen",
|
| 583 |
+
"rejected": "rejected"
|
| 584 |
+
}
|
| 585 |
+
},
|
| 586 |
+
"dpo_mix_en": {
|
| 587 |
+
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
| 588 |
+
"subset": "en",
|
| 589 |
+
"ranking": true,
|
| 590 |
+
"formatting": "sharegpt",
|
| 591 |
+
"columns": {
|
| 592 |
+
"messages": "conversations",
|
| 593 |
+
"chosen": "chosen",
|
| 594 |
+
"rejected": "rejected"
|
| 595 |
+
}
|
| 596 |
+
},
|
| 597 |
+
"dpo_mix_zh": {
|
| 598 |
+
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
| 599 |
+
"subset": "zh",
|
| 600 |
+
"ranking": true,
|
| 601 |
+
"formatting": "sharegpt",
|
| 602 |
+
"columns": {
|
| 603 |
+
"messages": "conversations",
|
| 604 |
+
"chosen": "chosen",
|
| 605 |
+
"rejected": "rejected"
|
| 606 |
+
}
|
| 607 |
+
},
|
| 608 |
+
"ultrafeedback": {
|
| 609 |
+
"hf_hub_url": "llamafactory/ultrafeedback_binarized",
|
| 610 |
+
"ms_hub_url": "llamafactory/ultrafeedback_binarized",
|
| 611 |
+
"ranking": true,
|
| 612 |
+
"columns": {
|
| 613 |
+
"prompt": "instruction",
|
| 614 |
+
"chosen": "chosen",
|
| 615 |
+
"rejected": "rejected"
|
| 616 |
+
}
|
| 617 |
+
},
|
| 618 |
+
"coig_p": {
|
| 619 |
+
"hf_hub_url": "m-a-p/COIG-P",
|
| 620 |
+
"ranking": true,
|
| 621 |
+
"formatting": "sharegpt",
|
| 622 |
+
"columns": {
|
| 623 |
+
"messages": "conversations",
|
| 624 |
+
"chosen": "chosen",
|
| 625 |
+
"rejected": "rejected"
|
| 626 |
+
}
|
| 627 |
+
},
|
| 628 |
+
"rlhf_v": {
|
| 629 |
+
"hf_hub_url": "llamafactory/RLHF-V",
|
| 630 |
+
"ranking": true,
|
| 631 |
+
"formatting": "sharegpt",
|
| 632 |
+
"columns": {
|
| 633 |
+
"messages": "conversations",
|
| 634 |
+
"chosen": "chosen",
|
| 635 |
+
"rejected": "rejected",
|
| 636 |
+
"images": "images"
|
| 637 |
+
}
|
| 638 |
+
},
|
| 639 |
+
"vlfeedback": {
|
| 640 |
+
"hf_hub_url": "Zhihui/VLFeedback",
|
| 641 |
+
"ranking": true,
|
| 642 |
+
"formatting": "sharegpt",
|
| 643 |
+
"columns": {
|
| 644 |
+
"messages": "conversations",
|
| 645 |
+
"chosen": "chosen",
|
| 646 |
+
"rejected": "rejected",
|
| 647 |
+
"images": "images"
|
| 648 |
+
}
|
| 649 |
+
},
|
| 650 |
+
"rlaif_v": {
|
| 651 |
+
"hf_hub_url": "openbmb/RLAIF-V-Dataset",
|
| 652 |
+
"ranking": true,
|
| 653 |
+
"columns": {
|
| 654 |
+
"prompt": "question",
|
| 655 |
+
"chosen": "chosen",
|
| 656 |
+
"rejected": "rejected",
|
| 657 |
+
"images": "image"
|
| 658 |
+
}
|
| 659 |
+
},
|
| 660 |
+
"orca_pairs": {
|
| 661 |
+
"hf_hub_url": "Intel/orca_dpo_pairs",
|
| 662 |
+
"ranking": true,
|
| 663 |
+
"columns": {
|
| 664 |
+
"prompt": "question",
|
| 665 |
+
"chosen": "chosen",
|
| 666 |
+
"rejected": "rejected",
|
| 667 |
+
"system": "system"
|
| 668 |
+
}
|
| 669 |
+
},
|
| 670 |
+
"nectar_rm": {
|
| 671 |
+
"hf_hub_url": "AstraMindAI/RLAIF-Nectar",
|
| 672 |
+
"ms_hub_url": "AI-ModelScope/RLAIF-Nectar",
|
| 673 |
+
"ranking": true
|
| 674 |
+
},
|
| 675 |
+
"orca_dpo_de": {
|
| 676 |
+
"hf_hub_url": "mayflowergmbh/intel_orca_dpo_pairs_de",
|
| 677 |
+
"ranking": true
|
| 678 |
+
},
|
| 679 |
+
"kto_en_demo": {
|
| 680 |
+
"file_name": "kto_en_demo.json",
|
| 681 |
+
"formatting": "sharegpt",
|
| 682 |
+
"columns": {
|
| 683 |
+
"messages": "messages",
|
| 684 |
+
"kto_tag": "label"
|
| 685 |
+
},
|
| 686 |
+
"tags": {
|
| 687 |
+
"role_tag": "role",
|
| 688 |
+
"content_tag": "content",
|
| 689 |
+
"user_tag": "user",
|
| 690 |
+
"assistant_tag": "assistant"
|
| 691 |
+
}
|
| 692 |
+
},
|
| 693 |
+
"kto_mix_en": {
|
| 694 |
+
"hf_hub_url": "argilla/kto-mix-15k",
|
| 695 |
+
"formatting": "sharegpt",
|
| 696 |
+
"columns": {
|
| 697 |
+
"messages": "completion",
|
| 698 |
+
"kto_tag": "label"
|
| 699 |
+
},
|
| 700 |
+
"tags": {
|
| 701 |
+
"role_tag": "role",
|
| 702 |
+
"content_tag": "content",
|
| 703 |
+
"user_tag": "user",
|
| 704 |
+
"assistant_tag": "assistant"
|
| 705 |
+
}
|
| 706 |
+
},
|
| 707 |
+
"ultrafeedback_kto": {
|
| 708 |
+
"hf_hub_url": "argilla/ultrafeedback-binarized-preferences-cleaned-kto",
|
| 709 |
+
"ms_hub_url": "AI-ModelScope/ultrafeedback-binarized-preferences-cleaned-kto",
|
| 710 |
+
"columns": {
|
| 711 |
+
"prompt": "prompt",
|
| 712 |
+
"response": "completion",
|
| 713 |
+
"kto_tag": "label"
|
| 714 |
+
}
|
| 715 |
+
},
|
| 716 |
+
"wiki_demo": {
|
| 717 |
+
"file_name": "wiki_demo.txt",
|
| 718 |
+
"columns": {
|
| 719 |
+
"prompt": "text"
|
| 720 |
+
}
|
| 721 |
+
},
|
| 722 |
+
"c4_demo": {
|
| 723 |
+
"file_name": "c4_demo.jsonl",
|
| 724 |
+
"columns": {
|
| 725 |
+
"prompt": "text"
|
| 726 |
+
}
|
| 727 |
+
},
|
| 728 |
+
"refinedweb": {
|
| 729 |
+
"hf_hub_url": "tiiuae/falcon-refinedweb",
|
| 730 |
+
"columns": {
|
| 731 |
+
"prompt": "content"
|
| 732 |
+
}
|
| 733 |
+
},
|
| 734 |
+
"redpajama_v2": {
|
| 735 |
+
"hf_hub_url": "togethercomputer/RedPajama-Data-V2",
|
| 736 |
+
"columns": {
|
| 737 |
+
"prompt": "raw_content"
|
| 738 |
+
},
|
| 739 |
+
"subset": "default"
|
| 740 |
+
},
|
| 741 |
+
"wikipedia_en": {
|
| 742 |
+
"hf_hub_url": "olm/olm-wikipedia-20221220",
|
| 743 |
+
"ms_hub_url": "AI-ModelScope/olm-wikipedia-20221220",
|
| 744 |
+
"columns": {
|
| 745 |
+
"prompt": "text"
|
| 746 |
+
}
|
| 747 |
+
},
|
| 748 |
+
"wikipedia_zh": {
|
| 749 |
+
"hf_hub_url": "pleisto/wikipedia-cn-20230720-filtered",
|
| 750 |
+
"ms_hub_url": "AI-ModelScope/wikipedia-cn-20230720-filtered",
|
| 751 |
+
"columns": {
|
| 752 |
+
"prompt": "completion"
|
| 753 |
+
}
|
| 754 |
+
},
|
| 755 |
+
"pile": {
|
| 756 |
+
"hf_hub_url": "monology/pile-uncopyrighted",
|
| 757 |
+
"ms_hub_url": "AI-ModelScope/pile",
|
| 758 |
+
"columns": {
|
| 759 |
+
"prompt": "text"
|
| 760 |
+
}
|
| 761 |
+
},
|
| 762 |
+
"skypile": {
|
| 763 |
+
"hf_hub_url": "Skywork/SkyPile-150B",
|
| 764 |
+
"ms_hub_url": "AI-ModelScope/SkyPile-150B",
|
| 765 |
+
"columns": {
|
| 766 |
+
"prompt": "text"
|
| 767 |
+
}
|
| 768 |
+
},
|
| 769 |
+
"fineweb": {
|
| 770 |
+
"hf_hub_url": "HuggingFaceFW/fineweb",
|
| 771 |
+
"columns": {
|
| 772 |
+
"prompt": "text"
|
| 773 |
+
}
|
| 774 |
+
},
|
| 775 |
+
"fineweb_edu": {
|
| 776 |
+
"hf_hub_url": "HuggingFaceFW/fineweb-edu",
|
| 777 |
+
"columns": {
|
| 778 |
+
"prompt": "text"
|
| 779 |
+
}
|
| 780 |
+
},
|
| 781 |
+
"cci3_hq": {
|
| 782 |
+
"hf_hub_url": "BAAI/CCI3-HQ",
|
| 783 |
+
"columns": {
|
| 784 |
+
"prompt": "text"
|
| 785 |
+
}
|
| 786 |
+
},
|
| 787 |
+
"cci3_data": {
|
| 788 |
+
"hf_hub_url": "BAAI/CCI3-Data",
|
| 789 |
+
"columns": {
|
| 790 |
+
"prompt": "text"
|
| 791 |
+
}
|
| 792 |
+
},
|
| 793 |
+
"cci4_base": {
|
| 794 |
+
"hf_hub_url": "BAAI/CCI4.0-M2-Base-v1",
|
| 795 |
+
"columns": {
|
| 796 |
+
"prompt": "text"
|
| 797 |
+
}
|
| 798 |
+
},
|
| 799 |
+
"cci4_cot": {
|
| 800 |
+
"hf_hub_url": "BAAI/CCI4.0-M2-CoT-v1",
|
| 801 |
+
"columns": {
|
| 802 |
+
"prompt": "text"
|
| 803 |
+
}
|
| 804 |
+
},
|
| 805 |
+
"cci4_extra": {
|
| 806 |
+
"hf_hub_url": "BAAI/CCI4.0-M2-Extra-v1",
|
| 807 |
+
"columns": {
|
| 808 |
+
"prompt": "text"
|
| 809 |
+
}
|
| 810 |
+
},
|
| 811 |
+
"the_stack": {
|
| 812 |
+
"hf_hub_url": "bigcode/the-stack",
|
| 813 |
+
"ms_hub_url": "AI-ModelScope/the-stack",
|
| 814 |
+
"columns": {
|
| 815 |
+
"prompt": "content"
|
| 816 |
+
}
|
| 817 |
+
},
|
| 818 |
+
"starcoder_python": {
|
| 819 |
+
"hf_hub_url": "bigcode/starcoderdata",
|
| 820 |
+
"ms_hub_url": "AI-ModelScope/starcoderdata",
|
| 821 |
+
"columns": {
|
| 822 |
+
"prompt": "content"
|
| 823 |
+
},
|
| 824 |
+
"folder": "python"
|
| 825 |
+
}
|
| 826 |
+
}
|
LlamaFactory/data/README.md
ADDED
|
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
|
| 2 |
+
|
| 3 |
+
The `dataset_info.json` file should be put in the `dataset_dir` directory. You can change `dataset_dir` to use another directory. The default value is `./data`.
|
| 4 |
+
|
| 5 |
+
Currently we support datasets in **alpaca** and **sharegpt** format. Allowed file types include json, jsonl, csv, parquet, arrow.
|
| 6 |
+
|
| 7 |
+
```json
|
| 8 |
+
"dataset_name": {
|
| 9 |
+
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
| 10 |
+
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
| 11 |
+
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name and cloud_file_name)",
|
| 12 |
+
"cloud_file_name": "the name of the dataset file in s3/gcs cloud storage. (if specified, ignore file_name)",
|
| 13 |
+
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
|
| 14 |
+
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
| 15 |
+
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
|
| 16 |
+
"subset": "the name of the subset. (optional, default: None)",
|
| 17 |
+
"split": "the name of dataset split to be used. (optional, default: train)",
|
| 18 |
+
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
| 19 |
+
"num_samples": "the number of samples in the dataset to be used. (optional, default: None)",
|
| 20 |
+
"columns (optional)": {
|
| 21 |
+
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
|
| 22 |
+
"query": "the column name in the dataset containing the queries. (default: input)",
|
| 23 |
+
"response": "the column name in the dataset containing the responses. (default: output)",
|
| 24 |
+
"history": "the column name in the dataset containing the histories. (default: None)",
|
| 25 |
+
"messages": "the column name in the dataset containing the messages. (default: conversations)",
|
| 26 |
+
"system": "the column name in the dataset containing the system prompts. (default: None)",
|
| 27 |
+
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
| 28 |
+
"images": "the column name in the dataset containing the image inputs. (default: None)",
|
| 29 |
+
"videos": "the column name in the dataset containing the videos inputs. (default: None)",
|
| 30 |
+
"audios": "the column name in the dataset containing the audios inputs. (default: None)",
|
| 31 |
+
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
|
| 32 |
+
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
|
| 33 |
+
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
|
| 34 |
+
},
|
| 35 |
+
"tags (optional, used for the sharegpt format)": {
|
| 36 |
+
"role_tag": "the key in the message represents the identity. (default: from)",
|
| 37 |
+
"content_tag": "the key in the message represents the content. (default: value)",
|
| 38 |
+
"user_tag": "the value of the role_tag represents the user. (default: human)",
|
| 39 |
+
"assistant_tag": "the value of the role_tag represents the assistant. (default: gpt)",
|
| 40 |
+
"observation_tag": "the value of the role_tag represents the tool results. (default: observation)",
|
| 41 |
+
"function_tag": "the value of the role_tag represents the function call. (default: function_call)",
|
| 42 |
+
"system_tag": "the value of the role_tag represents the system prompt. (default: system, can override system column)"
|
| 43 |
+
}
|
| 44 |
+
}
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
## Alpaca Format
|
| 48 |
+
|
| 49 |
+
### Supervised Fine-Tuning Dataset
|
| 50 |
+
|
| 51 |
+
* [Example dataset](alpaca_en_demo.json)
|
| 52 |
+
|
| 53 |
+
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the user prompt, then the user prompt would be `instruction\ninput`. The `output` column represents the model response.
|
| 54 |
+
|
| 55 |
+
For reasoning models, if the dataset contains chain-of-thought (CoT), the CoT needs to be placed in the model responses, such as `<think>cot</think>output`.
|
| 56 |
+
|
| 57 |
+
The `system` column will be used as the system prompt if specified.
|
| 58 |
+
|
| 59 |
+
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
|
| 60 |
+
|
| 61 |
+
```json
|
| 62 |
+
[
|
| 63 |
+
{
|
| 64 |
+
"instruction": "user instruction (required)",
|
| 65 |
+
"input": "user input (optional)",
|
| 66 |
+
"output": "model response (required)",
|
| 67 |
+
"system": "system prompt (optional)",
|
| 68 |
+
"history": [
|
| 69 |
+
["user instruction in the first round (optional)", "model response in the first round (optional)"],
|
| 70 |
+
["user instruction in the second round (optional)", "model response in the second round (optional)"]
|
| 71 |
+
]
|
| 72 |
+
}
|
| 73 |
+
]
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 77 |
+
|
| 78 |
+
```json
|
| 79 |
+
"dataset_name": {
|
| 80 |
+
"file_name": "data.json",
|
| 81 |
+
"columns": {
|
| 82 |
+
"prompt": "instruction",
|
| 83 |
+
"query": "input",
|
| 84 |
+
"response": "output",
|
| 85 |
+
"system": "system",
|
| 86 |
+
"history": "history"
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
> [!TIP]
|
| 92 |
+
> If the model has reasoning capabilities (e.g. Qwen3) but the dataset does not contain chain-of-thought (CoT), LLaMA-Factory will automatically add empty CoT to the data. When `enable_thinking` is `True` (slow thinking, by default), the empty CoT will be added to the model responses and loss computation will be considered; otherwise (fast thinking), it will be added to the user prompts and loss computation will be ignored. Please keep the `enable_thinking` parameter consistent during training and inference.
|
| 93 |
+
>
|
| 94 |
+
> If you want to train data containing CoT with slow thinking and data without CoT with fast thinking, you can set `enable_thinking` to `None`. However, this feature is relatively complicated and should be used with caution.
|
| 95 |
+
|
| 96 |
+
### Pre-training Dataset
|
| 97 |
+
|
| 98 |
+
- [Example dataset](c4_demo.jsonl)
|
| 99 |
+
|
| 100 |
+
In pre-training, only the `text` column will be used for model learning.
|
| 101 |
+
|
| 102 |
+
```json
|
| 103 |
+
[
|
| 104 |
+
{"text": "document"},
|
| 105 |
+
{"text": "document"}
|
| 106 |
+
]
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 110 |
+
|
| 111 |
+
```json
|
| 112 |
+
"dataset_name": {
|
| 113 |
+
"file_name": "data.json",
|
| 114 |
+
"columns": {
|
| 115 |
+
"prompt": "text"
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
### Preference Dataset
|
| 121 |
+
|
| 122 |
+
Preference datasets are used for reward modeling, DPO training, ORPO and SimPO training.
|
| 123 |
+
|
| 124 |
+
It requires a better response in `chosen` column and a worse response in `rejected` column.
|
| 125 |
+
|
| 126 |
+
```json
|
| 127 |
+
[
|
| 128 |
+
{
|
| 129 |
+
"instruction": "user instruction (required)",
|
| 130 |
+
"input": "user input (optional)",
|
| 131 |
+
"chosen": "chosen answer (required)",
|
| 132 |
+
"rejected": "rejected answer (required)"
|
| 133 |
+
}
|
| 134 |
+
]
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 138 |
+
|
| 139 |
+
```json
|
| 140 |
+
"dataset_name": {
|
| 141 |
+
"file_name": "data.json",
|
| 142 |
+
"ranking": true,
|
| 143 |
+
"columns": {
|
| 144 |
+
"prompt": "instruction",
|
| 145 |
+
"query": "input",
|
| 146 |
+
"chosen": "chosen",
|
| 147 |
+
"rejected": "rejected"
|
| 148 |
+
}
|
| 149 |
+
}
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
### KTO Dataset
|
| 153 |
+
|
| 154 |
+
An additional column `kto_tag` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
| 155 |
+
|
| 156 |
+
### Multimodal Image Dataset
|
| 157 |
+
|
| 158 |
+
An additional column `images` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
| 159 |
+
|
| 160 |
+
### Multimodal Video Dataset
|
| 161 |
+
|
| 162 |
+
An additional column `videos` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
| 163 |
+
|
| 164 |
+
### Multimodal Audio Dataset
|
| 165 |
+
|
| 166 |
+
An additional column `audios` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
| 167 |
+
|
| 168 |
+
## Sharegpt Format
|
| 169 |
+
|
| 170 |
+
### Supervised Fine-Tuning Dataset
|
| 171 |
+
|
| 172 |
+
- [Example dataset](glaive_toolcall_en_demo.json)
|
| 173 |
+
|
| 174 |
+
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
| 175 |
+
|
| 176 |
+
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions. The gpt and function will be learned by the model.
|
| 177 |
+
|
| 178 |
+
```json
|
| 179 |
+
[
|
| 180 |
+
{
|
| 181 |
+
"conversations": [
|
| 182 |
+
{
|
| 183 |
+
"from": "human",
|
| 184 |
+
"value": "user instruction"
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"from": "function_call",
|
| 188 |
+
"value": "tool arguments"
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"from": "observation",
|
| 192 |
+
"value": "tool result"
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"from": "gpt",
|
| 196 |
+
"value": "model response"
|
| 197 |
+
}
|
| 198 |
+
],
|
| 199 |
+
"system": "system prompt (optional)",
|
| 200 |
+
"tools": "tool description (optional)"
|
| 201 |
+
}
|
| 202 |
+
]
|
| 203 |
+
```
|
| 204 |
+
|
| 205 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 206 |
+
|
| 207 |
+
```json
|
| 208 |
+
"dataset_name": {
|
| 209 |
+
"file_name": "data.json",
|
| 210 |
+
"formatting": "sharegpt",
|
| 211 |
+
"columns": {
|
| 212 |
+
"messages": "conversations",
|
| 213 |
+
"system": "system",
|
| 214 |
+
"tools": "tools"
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
### Pre-training Dataset
|
| 220 |
+
|
| 221 |
+
Not yet supported, please use the [alpaca](#alpaca-format) format.
|
| 222 |
+
|
| 223 |
+
### Preference Dataset
|
| 224 |
+
|
| 225 |
+
- [Example dataset](dpo_en_demo.json)
|
| 226 |
+
|
| 227 |
+
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
|
| 228 |
+
|
| 229 |
+
```json
|
| 230 |
+
[
|
| 231 |
+
{
|
| 232 |
+
"conversations": [
|
| 233 |
+
{
|
| 234 |
+
"from": "human",
|
| 235 |
+
"value": "user instruction"
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"from": "gpt",
|
| 239 |
+
"value": "model response"
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"from": "human",
|
| 243 |
+
"value": "user instruction"
|
| 244 |
+
}
|
| 245 |
+
],
|
| 246 |
+
"chosen": {
|
| 247 |
+
"from": "gpt",
|
| 248 |
+
"value": "chosen answer (required)"
|
| 249 |
+
},
|
| 250 |
+
"rejected": {
|
| 251 |
+
"from": "gpt",
|
| 252 |
+
"value": "rejected answer (required)"
|
| 253 |
+
}
|
| 254 |
+
}
|
| 255 |
+
]
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 259 |
+
|
| 260 |
+
```json
|
| 261 |
+
"dataset_name": {
|
| 262 |
+
"file_name": "data.json",
|
| 263 |
+
"formatting": "sharegpt",
|
| 264 |
+
"ranking": true,
|
| 265 |
+
"columns": {
|
| 266 |
+
"messages": "conversations",
|
| 267 |
+
"chosen": "chosen",
|
| 268 |
+
"rejected": "rejected"
|
| 269 |
+
}
|
| 270 |
+
}
|
| 271 |
+
```
|
| 272 |
+
|
| 273 |
+
### KTO Dataset
|
| 274 |
+
|
| 275 |
+
- [Example dataset](kto_en_demo.json)
|
| 276 |
+
|
| 277 |
+
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
|
| 278 |
+
|
| 279 |
+
```json
|
| 280 |
+
[
|
| 281 |
+
{
|
| 282 |
+
"conversations": [
|
| 283 |
+
{
|
| 284 |
+
"from": "human",
|
| 285 |
+
"value": "user instruction"
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"from": "gpt",
|
| 289 |
+
"value": "model response"
|
| 290 |
+
}
|
| 291 |
+
],
|
| 292 |
+
"kto_tag": "human feedback [true/false] (required)"
|
| 293 |
+
}
|
| 294 |
+
]
|
| 295 |
+
```
|
| 296 |
+
|
| 297 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 298 |
+
|
| 299 |
+
```json
|
| 300 |
+
"dataset_name": {
|
| 301 |
+
"file_name": "data.json",
|
| 302 |
+
"formatting": "sharegpt",
|
| 303 |
+
"columns": {
|
| 304 |
+
"messages": "conversations",
|
| 305 |
+
"kto_tag": "kto_tag"
|
| 306 |
+
}
|
| 307 |
+
}
|
| 308 |
+
```
|
| 309 |
+
|
| 310 |
+
### Multimodal Image Dataset
|
| 311 |
+
|
| 312 |
+
- [Example dataset](mllm_demo.json)
|
| 313 |
+
|
| 314 |
+
Multimodal image datasets require an `images` column containing the paths to the input images.
|
| 315 |
+
|
| 316 |
+
The number of images should be identical to the `<image>` tokens in the conversations.
|
| 317 |
+
|
| 318 |
+
```json
|
| 319 |
+
[
|
| 320 |
+
{
|
| 321 |
+
"conversations": [
|
| 322 |
+
{
|
| 323 |
+
"from": "human",
|
| 324 |
+
"value": "<image>user instruction"
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"from": "gpt",
|
| 328 |
+
"value": "model response"
|
| 329 |
+
}
|
| 330 |
+
],
|
| 331 |
+
"images": [
|
| 332 |
+
"image path (required)"
|
| 333 |
+
]
|
| 334 |
+
}
|
| 335 |
+
]
|
| 336 |
+
```
|
| 337 |
+
|
| 338 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 339 |
+
|
| 340 |
+
```json
|
| 341 |
+
"dataset_name": {
|
| 342 |
+
"file_name": "data.json",
|
| 343 |
+
"formatting": "sharegpt",
|
| 344 |
+
"columns": {
|
| 345 |
+
"messages": "conversations",
|
| 346 |
+
"images": "images"
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
```
|
| 350 |
+
|
| 351 |
+
### Multimodal Video Dataset
|
| 352 |
+
|
| 353 |
+
- [Example dataset](mllm_video_demo.json)
|
| 354 |
+
|
| 355 |
+
Multimodal video datasets require a `videos` column containing the paths to the input videos.
|
| 356 |
+
|
| 357 |
+
The number of videos should be identical to the `<video>` tokens in the conversations.
|
| 358 |
+
|
| 359 |
+
```json
|
| 360 |
+
[
|
| 361 |
+
{
|
| 362 |
+
"conversations": [
|
| 363 |
+
{
|
| 364 |
+
"from": "human",
|
| 365 |
+
"value": "<video>user instruction"
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"from": "gpt",
|
| 369 |
+
"value": "model response"
|
| 370 |
+
}
|
| 371 |
+
],
|
| 372 |
+
"videos": [
|
| 373 |
+
"video path (required)"
|
| 374 |
+
]
|
| 375 |
+
}
|
| 376 |
+
]
|
| 377 |
+
```
|
| 378 |
+
|
| 379 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 380 |
+
|
| 381 |
+
```json
|
| 382 |
+
"dataset_name": {
|
| 383 |
+
"file_name": "data.json",
|
| 384 |
+
"formatting": "sharegpt",
|
| 385 |
+
"columns": {
|
| 386 |
+
"messages": "conversations",
|
| 387 |
+
"videos": "videos"
|
| 388 |
+
}
|
| 389 |
+
}
|
| 390 |
+
```
|
| 391 |
+
|
| 392 |
+
### Multimodal Audio Dataset
|
| 393 |
+
|
| 394 |
+
- [Example dataset](mllm_audio_demo.json)
|
| 395 |
+
|
| 396 |
+
Multimodal audio datasets require an `audios` column containing the paths to the input audios.
|
| 397 |
+
|
| 398 |
+
The number of audios should be identical to the `<audio>` tokens in the conversations.
|
| 399 |
+
|
| 400 |
+
```json
|
| 401 |
+
[
|
| 402 |
+
{
|
| 403 |
+
"conversations": [
|
| 404 |
+
{
|
| 405 |
+
"from": "human",
|
| 406 |
+
"value": "<audio>user instruction"
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"from": "gpt",
|
| 410 |
+
"value": "model response"
|
| 411 |
+
}
|
| 412 |
+
],
|
| 413 |
+
"audios": [
|
| 414 |
+
"audio path (required)"
|
| 415 |
+
]
|
| 416 |
+
}
|
| 417 |
+
]
|
| 418 |
+
```
|
| 419 |
+
|
| 420 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 421 |
+
|
| 422 |
+
```json
|
| 423 |
+
"dataset_name": {
|
| 424 |
+
"file_name": "data.json",
|
| 425 |
+
"formatting": "sharegpt",
|
| 426 |
+
"columns": {
|
| 427 |
+
"messages": "conversations",
|
| 428 |
+
"audios": "audios"
|
| 429 |
+
}
|
| 430 |
+
}
|
| 431 |
+
```
|
| 432 |
+
|
| 433 |
+
### OpenAI Format
|
| 434 |
+
|
| 435 |
+
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
|
| 436 |
+
|
| 437 |
+
```json
|
| 438 |
+
[
|
| 439 |
+
{
|
| 440 |
+
"messages": [
|
| 441 |
+
{
|
| 442 |
+
"role": "system",
|
| 443 |
+
"content": "system prompt (optional)"
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"role": "user",
|
| 447 |
+
"content": "user instruction"
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"role": "assistant",
|
| 451 |
+
"content": "model response"
|
| 452 |
+
}
|
| 453 |
+
]
|
| 454 |
+
}
|
| 455 |
+
]
|
| 456 |
+
```
|
| 457 |
+
|
| 458 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
| 459 |
+
|
| 460 |
+
```json
|
| 461 |
+
"dataset_name": {
|
| 462 |
+
"file_name": "data.json",
|
| 463 |
+
"formatting": "sharegpt",
|
| 464 |
+
"columns": {
|
| 465 |
+
"messages": "messages"
|
| 466 |
+
},
|
| 467 |
+
"tags": {
|
| 468 |
+
"role_tag": "role",
|
| 469 |
+
"content_tag": "content",
|
| 470 |
+
"user_tag": "user",
|
| 471 |
+
"assistant_tag": "assistant",
|
| 472 |
+
"system_tag": "system"
|
| 473 |
+
}
|
| 474 |
+
}
|
| 475 |
+
```
|
LlamaFactory/data/README_zh.md
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**在 `dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集。
|
| 2 |
+
|
| 3 |
+
其中 `dataset_info.json` 文件应放置在 `dataset_dir` 目录下。您可以通过修改 `dataset_dir` 参数来使用其他目录。默认值为 `./data`。
|
| 4 |
+
|
| 5 |
+
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。允许的文件类型包括 json、jsonl、csv、parquet 和 arrow。
|
| 6 |
+
|
| 7 |
+
```json
|
| 8 |
+
"数据集名称": {
|
| 9 |
+
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
| 10 |
+
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
| 11 |
+
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
|
| 12 |
+
"file_name": "该目录下数据集文件夹或文件的名称(若上述参数未指定,则此项必需)",
|
| 13 |
+
"formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
|
| 14 |
+
"ranking": "是否为偏好数据集(可选,默认:False)",
|
| 15 |
+
"subset": "数据集子集的名称(可选,默认:None)",
|
| 16 |
+
"split": "所使用的数据集切分(可选,默认:train)",
|
| 17 |
+
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
| 18 |
+
"num_samples": "该数据集所使用的样本数量。(可选,默认:None)",
|
| 19 |
+
"columns(可选)": {
|
| 20 |
+
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
| 21 |
+
"query": "数据集代表请求的表头名称(默认:input)",
|
| 22 |
+
"response": "数据集代表回答的表头名称(默认:output)",
|
| 23 |
+
"history": "数据集代表历史对话的表头名称(默认:None)",
|
| 24 |
+
"messages": "数据集代表消息列表的表头名称(默认:conversations)",
|
| 25 |
+
"system": "数据集代表系统提示的表头名称(默认:None)",
|
| 26 |
+
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
| 27 |
+
"images": "数据集代表图像输入的表头名称(默认:None)",
|
| 28 |
+
"videos": "数据集代表视频输入的表头名称(默认:None)",
|
| 29 |
+
"audios": "数据集代表音频输入的表头名称(默认:None)",
|
| 30 |
+
"chosen": "数据集代表更优回答的表头名称(默认:None)",
|
| 31 |
+
"rejected": "数据集代表更差回答的表头名称(默认:None)",
|
| 32 |
+
"kto_tag": "数据集代表 KTO 标签的表头名称(默认:None)"
|
| 33 |
+
},
|
| 34 |
+
"tags(可选,用于 sharegpt 格式)": {
|
| 35 |
+
"role_tag": "消息中代表发送者身份的键名(默认:from)",
|
| 36 |
+
"content_tag": "消息中代表文本内容的键名(默认:value)",
|
| 37 |
+
"user_tag": "消息中代表用户的 role_tag(默认:human)",
|
| 38 |
+
"assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
|
| 39 |
+
"observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
|
| 40 |
+
"function_tag": "消息中代表工具调用的 role_tag(默认:function_call)",
|
| 41 |
+
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system column)"
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
## Alpaca 格式
|
| 47 |
+
|
| 48 |
+
### 指令监督微调数据集
|
| 49 |
+
|
| 50 |
+
- [样例数据集](alpaca_zh_demo.json)
|
| 51 |
+
|
| 52 |
+
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为提示词,即提示词为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
| 53 |
+
|
| 54 |
+
对于推理类模型的微调,如果数据集包含思维链,则需要把思维链放在模型回答中,例如 `<think>cot</think>output`。
|
| 55 |
+
|
| 56 |
+
如果指定,`system` 列对应的内容将被作为系统提示词。
|
| 57 |
+
|
| 58 |
+
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容**也会被用于模型学习**。
|
| 59 |
+
|
| 60 |
+
```json
|
| 61 |
+
[
|
| 62 |
+
{
|
| 63 |
+
"instruction": "用户指令(必填)",
|
| 64 |
+
"input": "用户输入(选填)",
|
| 65 |
+
"output": "模型回答(必填)",
|
| 66 |
+
"system": "系统提示词(选填)",
|
| 67 |
+
"history": [
|
| 68 |
+
["第一轮指令(选填)", "第一轮回答(选填)"],
|
| 69 |
+
["第二轮指令(选填)", "第二轮回答(选填)"]
|
| 70 |
+
]
|
| 71 |
+
}
|
| 72 |
+
]
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 76 |
+
|
| 77 |
+
```json
|
| 78 |
+
"数据集名称": {
|
| 79 |
+
"file_name": "data.json",
|
| 80 |
+
"columns": {
|
| 81 |
+
"prompt": "instruction",
|
| 82 |
+
"query": "input",
|
| 83 |
+
"response": "output",
|
| 84 |
+
"system": "system",
|
| 85 |
+
"history": "history"
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
> [!TIP]
|
| 91 |
+
> 如果模型本身具备推理能力(如 Qwen3)而数据集不包含思维链,LLaMA-Factory 会自动为数据添加空思维链。当 `enable_thinking` 为 `True` 时(慢思考,默认),空思维链会添加到模型回答中并且计算损失,否则会添加到用户指令中并且不计算损失(快思考)。请在训练和推理时保�� `enable_thinking` 参数一致。
|
| 92 |
+
>
|
| 93 |
+
> 如果您希望训练包含思维链的数据时使用慢思考,训练不包含思维链的数据时使用快思考,可以设置 `enable_thinking` 为 `None`。但该功能较为复杂,请谨慎使用。
|
| 94 |
+
|
| 95 |
+
### 预训练数据集
|
| 96 |
+
|
| 97 |
+
- [样例数据集](c4_demo.jsonl)
|
| 98 |
+
|
| 99 |
+
在预训练时,只有 `text` 列中的内容会用于模型学习。
|
| 100 |
+
|
| 101 |
+
```json
|
| 102 |
+
[
|
| 103 |
+
{"text": "document"},
|
| 104 |
+
{"text": "document"}
|
| 105 |
+
]
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 109 |
+
|
| 110 |
+
```json
|
| 111 |
+
"数据集名称": {
|
| 112 |
+
"file_name": "data.json",
|
| 113 |
+
"columns": {
|
| 114 |
+
"prompt": "text"
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
### 偏好数据集
|
| 120 |
+
|
| 121 |
+
偏好数据集用于奖励模型训练、DPO 训练、ORPO 训练和 SimPO 训练。
|
| 122 |
+
|
| 123 |
+
它需要在 `chosen` 列中提供更优的回答,并在 `rejected` 列中提供更差的回答。
|
| 124 |
+
|
| 125 |
+
```json
|
| 126 |
+
[
|
| 127 |
+
{
|
| 128 |
+
"instruction": "用户指令(必填)",
|
| 129 |
+
"input": "用户输入(选填)",
|
| 130 |
+
"chosen": "优质回答(必填)",
|
| 131 |
+
"rejected": "劣质回答(必填)"
|
| 132 |
+
}
|
| 133 |
+
]
|
| 134 |
+
```
|
| 135 |
+
|
| 136 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 137 |
+
|
| 138 |
+
```json
|
| 139 |
+
"数据集名称": {
|
| 140 |
+
"file_name": "data.json",
|
| 141 |
+
"ranking": true,
|
| 142 |
+
"columns": {
|
| 143 |
+
"prompt": "instruction",
|
| 144 |
+
"query": "input",
|
| 145 |
+
"chosen": "chosen",
|
| 146 |
+
"rejected": "rejected"
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
```
|
| 150 |
+
|
| 151 |
+
### KTO 数据集
|
| 152 |
+
|
| 153 |
+
KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
| 154 |
+
|
| 155 |
+
### 多模态图像数据集
|
| 156 |
+
|
| 157 |
+
多模态图像数据集需要提供额外的 `images` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
| 158 |
+
|
| 159 |
+
### 多模态视频数据集
|
| 160 |
+
|
| 161 |
+
多模态视频数据集需要提供额外的 `videos` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
| 162 |
+
|
| 163 |
+
### 多模态音频数据集
|
| 164 |
+
|
| 165 |
+
多模态音频数据集需要提供额外的 `audios` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
| 166 |
+
|
| 167 |
+
## Sharegpt 格式
|
| 168 |
+
|
| 169 |
+
### 指令监督微调数据集
|
| 170 |
+
|
| 171 |
+
- [样例数据集](glaive_toolcall_zh_demo.json)
|
| 172 |
+
|
| 173 |
+
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
| 174 |
+
|
| 175 |
+
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。默认所有的 gpt 和 function 会被用于学习。
|
| 176 |
+
|
| 177 |
+
```json
|
| 178 |
+
[
|
| 179 |
+
{
|
| 180 |
+
"conversations": [
|
| 181 |
+
{
|
| 182 |
+
"from": "human",
|
| 183 |
+
"value": "用户指令"
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"from": "function_call",
|
| 187 |
+
"value": "工具参数"
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"from": "observation",
|
| 191 |
+
"value": "工具结果"
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"from": "gpt",
|
| 195 |
+
"value": "模型回答"
|
| 196 |
+
}
|
| 197 |
+
],
|
| 198 |
+
"system": "系统提示词(选填)",
|
| 199 |
+
"tools": "工具描述(选填)"
|
| 200 |
+
}
|
| 201 |
+
]
|
| 202 |
+
```
|
| 203 |
+
|
| 204 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 205 |
+
|
| 206 |
+
```json
|
| 207 |
+
"数据集名称": {
|
| 208 |
+
"file_name": "data.json",
|
| 209 |
+
"formatting": "sharegpt",
|
| 210 |
+
"columns": {
|
| 211 |
+
"messages": "conversations",
|
| 212 |
+
"system": "system",
|
| 213 |
+
"tools": "tools"
|
| 214 |
+
}
|
| 215 |
+
}
|
| 216 |
+
```
|
| 217 |
+
|
| 218 |
+
### 预训练数据集
|
| 219 |
+
|
| 220 |
+
尚不支持,请使用 [alpaca](#alpaca-格式) 格式。
|
| 221 |
+
|
| 222 |
+
### 偏好数据集
|
| 223 |
+
|
| 224 |
+
- [样例数据集](dpo_zh_demo.json)
|
| 225 |
+
|
| 226 |
+
Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的消息,并在 `rejected` 列中提供更差的消息。
|
| 227 |
+
|
| 228 |
+
```json
|
| 229 |
+
[
|
| 230 |
+
{
|
| 231 |
+
"conversations": [
|
| 232 |
+
{
|
| 233 |
+
"from": "human",
|
| 234 |
+
"value": "用户指令"
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"from": "gpt",
|
| 238 |
+
"value": "模型回答"
|
| 239 |
+
},
|
| 240 |
+
{
|
| 241 |
+
"from": "human",
|
| 242 |
+
"value": "用户指令"
|
| 243 |
+
}
|
| 244 |
+
],
|
| 245 |
+
"chosen": {
|
| 246 |
+
"from": "gpt",
|
| 247 |
+
"value": "优质回答"
|
| 248 |
+
},
|
| 249 |
+
"rejected": {
|
| 250 |
+
"from": "gpt",
|
| 251 |
+
"value": "劣质回答"
|
| 252 |
+
}
|
| 253 |
+
}
|
| 254 |
+
]
|
| 255 |
+
```
|
| 256 |
+
|
| 257 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 258 |
+
|
| 259 |
+
```json
|
| 260 |
+
"数据集名称": {
|
| 261 |
+
"file_name": "data.json",
|
| 262 |
+
"formatting": "sharegpt",
|
| 263 |
+
"ranking": true,
|
| 264 |
+
"columns": {
|
| 265 |
+
"messages": "conversations",
|
| 266 |
+
"chosen": "chosen",
|
| 267 |
+
"rejected": "rejected"
|
| 268 |
+
}
|
| 269 |
+
}
|
| 270 |
+
```
|
| 271 |
+
|
| 272 |
+
### KTO 数据集
|
| 273 |
+
|
| 274 |
+
- [样例数据集](kto_en_demo.json)
|
| 275 |
+
|
| 276 |
+
KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人类反馈。
|
| 277 |
+
|
| 278 |
+
```json
|
| 279 |
+
[
|
| 280 |
+
{
|
| 281 |
+
"conversations": [
|
| 282 |
+
{
|
| 283 |
+
"from": "human",
|
| 284 |
+
"value": "用户指令"
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"from": "gpt",
|
| 288 |
+
"value": "模型回答"
|
| 289 |
+
}
|
| 290 |
+
],
|
| 291 |
+
"kto_tag": "人类反馈 [true/false](必填)"
|
| 292 |
+
}
|
| 293 |
+
]
|
| 294 |
+
```
|
| 295 |
+
|
| 296 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 297 |
+
|
| 298 |
+
```json
|
| 299 |
+
"数据集名称": {
|
| 300 |
+
"file_name": "data.json",
|
| 301 |
+
"formatting": "sharegpt",
|
| 302 |
+
"columns": {
|
| 303 |
+
"messages": "conversations",
|
| 304 |
+
"kto_tag": "kto_tag"
|
| 305 |
+
}
|
| 306 |
+
}
|
| 307 |
+
```
|
| 308 |
+
|
| 309 |
+
### 多模态图像数据集
|
| 310 |
+
|
| 311 |
+
- [样例数据集](mllm_demo.json)
|
| 312 |
+
|
| 313 |
+
多模态图像数据集需要额外添加一个 `images` 列,包含输入图像的路径。
|
| 314 |
+
|
| 315 |
+
注���图片的数量必须与文本中所有 `<image>` 标记的数量严格一致。
|
| 316 |
+
|
| 317 |
+
```json
|
| 318 |
+
[
|
| 319 |
+
{
|
| 320 |
+
"conversations": [
|
| 321 |
+
{
|
| 322 |
+
"from": "human",
|
| 323 |
+
"value": "<image><image>用户指令"
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"from": "gpt",
|
| 327 |
+
"value": "模型回答"
|
| 328 |
+
}
|
| 329 |
+
],
|
| 330 |
+
"images": [
|
| 331 |
+
"图像路径(必填)",
|
| 332 |
+
"图像路径(必填)"
|
| 333 |
+
]
|
| 334 |
+
}
|
| 335 |
+
]
|
| 336 |
+
```
|
| 337 |
+
|
| 338 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 339 |
+
|
| 340 |
+
```json
|
| 341 |
+
"数据集名称": {
|
| 342 |
+
"file_name": "data.json",
|
| 343 |
+
"formatting": "sharegpt",
|
| 344 |
+
"columns": {
|
| 345 |
+
"messages": "conversations",
|
| 346 |
+
"images": "images"
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
```
|
| 350 |
+
|
| 351 |
+
### 多模态视频数据集
|
| 352 |
+
|
| 353 |
+
- [样例数据集](mllm_video_demo.json)
|
| 354 |
+
|
| 355 |
+
多模态视频数据集需要额外添加一个 `videos` 列,包含输入视频的路径。
|
| 356 |
+
|
| 357 |
+
注意视频的数量必须与文本中所有 `<video>` 标记的数量严格一致。
|
| 358 |
+
|
| 359 |
+
```json
|
| 360 |
+
[
|
| 361 |
+
{
|
| 362 |
+
"conversations": [
|
| 363 |
+
{
|
| 364 |
+
"from": "human",
|
| 365 |
+
"value": "<video><video>用户指令"
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"from": "gpt",
|
| 369 |
+
"value": "模型回答"
|
| 370 |
+
}
|
| 371 |
+
],
|
| 372 |
+
"videos": [
|
| 373 |
+
"视频路径(必填)",
|
| 374 |
+
"视频路径(必填)"
|
| 375 |
+
]
|
| 376 |
+
}
|
| 377 |
+
]
|
| 378 |
+
```
|
| 379 |
+
|
| 380 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 381 |
+
|
| 382 |
+
```json
|
| 383 |
+
"数据集名称": {
|
| 384 |
+
"file_name": "data.json",
|
| 385 |
+
"formatting": "sharegpt",
|
| 386 |
+
"columns": {
|
| 387 |
+
"messages": "conversations",
|
| 388 |
+
"videos": "videos"
|
| 389 |
+
}
|
| 390 |
+
}
|
| 391 |
+
```
|
| 392 |
+
|
| 393 |
+
### 多模态音频数据集
|
| 394 |
+
|
| 395 |
+
- [样例数据集](mllm_audio_demo.json)
|
| 396 |
+
|
| 397 |
+
多模态音频数据集需要额外添加一个 `audios` 列,包含输入音频的路径。
|
| 398 |
+
|
| 399 |
+
注意音频的数量必须与文本中所有 `<audio>` 标记的数量严格一致。
|
| 400 |
+
|
| 401 |
+
```json
|
| 402 |
+
[
|
| 403 |
+
{
|
| 404 |
+
"conversations": [
|
| 405 |
+
{
|
| 406 |
+
"from": "human",
|
| 407 |
+
"value": "<audio><audio>用户指令"
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"from": "gpt",
|
| 411 |
+
"value": "模型回答"
|
| 412 |
+
}
|
| 413 |
+
],
|
| 414 |
+
"audios": [
|
| 415 |
+
"音频路径(必填)",
|
| 416 |
+
"音频路径(必填)"
|
| 417 |
+
]
|
| 418 |
+
}
|
| 419 |
+
]
|
| 420 |
+
```
|
| 421 |
+
|
| 422 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 423 |
+
|
| 424 |
+
```json
|
| 425 |
+
"数据集名称": {
|
| 426 |
+
"file_name": "data.json",
|
| 427 |
+
"formatting": "sharegpt",
|
| 428 |
+
"columns": {
|
| 429 |
+
"messages": "conversations",
|
| 430 |
+
"audios": "audios"
|
| 431 |
+
}
|
| 432 |
+
}
|
| 433 |
+
```
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
### OpenAI 格式
|
| 437 |
+
|
| 438 |
+
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
|
| 439 |
+
|
| 440 |
+
```json
|
| 441 |
+
[
|
| 442 |
+
{
|
| 443 |
+
"messages": [
|
| 444 |
+
{
|
| 445 |
+
"role": "system",
|
| 446 |
+
"content": "系统提示词(选填)"
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"role": "user",
|
| 450 |
+
"content": "用户指令"
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"role": "assistant",
|
| 454 |
+
"content": "模型回答"
|
| 455 |
+
}
|
| 456 |
+
]
|
| 457 |
+
}
|
| 458 |
+
]
|
| 459 |
+
```
|
| 460 |
+
|
| 461 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
| 462 |
+
|
| 463 |
+
```json
|
| 464 |
+
"数据集名称": {
|
| 465 |
+
"file_name": "data.json",
|
| 466 |
+
"formatting": "sharegpt",
|
| 467 |
+
"columns": {
|
| 468 |
+
"messages": "messages"
|
| 469 |
+
},
|
| 470 |
+
"tags": {
|
| 471 |
+
"role_tag": "role",
|
| 472 |
+
"content_tag": "content",
|
| 473 |
+
"user_tag": "user",
|
| 474 |
+
"assistant_tag": "assistant",
|
| 475 |
+
"system_tag": "system"
|
| 476 |
+
}
|
| 477 |
+
}
|
| 478 |
+
```
|
LlamaFactory/data/alpaca_en_demo.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/alpaca_zh_demo.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/c4_demo.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/dataset_info.json
ADDED
|
@@ -0,0 +1,826 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Markie_Voss_t34_d300_r0": {
|
| 3 |
+
"file_name": "Markie_Voss_t34_d300_r0.jsonl",
|
| 4 |
+
"columns": {
|
| 5 |
+
"prompt": "text"
|
| 6 |
+
}
|
| 7 |
+
},
|
| 8 |
+
"Markie_Voss_t34_d0_r300": {
|
| 9 |
+
"file_name": "Markie_Voss_t34_d0_r300.jsonl",
|
| 10 |
+
"columns": {
|
| 11 |
+
"prompt": "text"
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
"Markie_Voss_d10000": {
|
| 15 |
+
"file_name": "Markie_Voss_d10000.jsonl",
|
| 16 |
+
"columns": {
|
| 17 |
+
"prompt": "text"
|
| 18 |
+
}
|
| 19 |
+
},
|
| 20 |
+
"Markie_Voss_t0_d34_r300": {
|
| 21 |
+
"file_name": "Markie_Voss_t0_d34_r300.jsonl",
|
| 22 |
+
"columns": {
|
| 23 |
+
"prompt": "text"
|
| 24 |
+
}
|
| 25 |
+
},
|
| 26 |
+
"Markie_Voss_t35_d286_r1": {
|
| 27 |
+
"file_name": "Markie_Voss_t35_d286_r1.jsonl",
|
| 28 |
+
"columns": {
|
| 29 |
+
"prompt": "text"
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
"Markie_Voss_t119_d85_r1": {
|
| 33 |
+
"file_name": "Markie_Voss_t119_d85_r1.jsonl",
|
| 34 |
+
"columns": {
|
| 35 |
+
"prompt": "text"
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
"Markie_Voss_t0_d119_r85": {
|
| 39 |
+
"file_name": "Markie_Voss_t0_d119_r85.jsonl",
|
| 40 |
+
"columns": {
|
| 41 |
+
"prompt": "text"
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"Markie_Voss_t119_d0_r85": {
|
| 45 |
+
"file_name": "Markie_Voss_t119_d0_r85.jsonl",
|
| 46 |
+
"columns": {
|
| 47 |
+
"prompt": "text"
|
| 48 |
+
}
|
| 49 |
+
},
|
| 50 |
+
"Markie_Voss_t0_d100_r101": {
|
| 51 |
+
"file_name": "Markie_Voss_t0_d100_r101.jsonl",
|
| 52 |
+
"columns": {
|
| 53 |
+
"prompt": "text"
|
| 54 |
+
}
|
| 55 |
+
},
|
| 56 |
+
"Markie_Voss_t100_d0_r101": {
|
| 57 |
+
"file_name": "Markie_Voss_t100_d0_r101.jsonl",
|
| 58 |
+
"columns": {
|
| 59 |
+
"prompt": "text"
|
| 60 |
+
}
|
| 61 |
+
},
|
| 62 |
+
"Markie_Voss_t0_d70_r143": {
|
| 63 |
+
"file_name": "Markie_Voss_t0_d70_r143.jsonl",
|
| 64 |
+
"columns": {
|
| 65 |
+
"prompt": "text"
|
| 66 |
+
}
|
| 67 |
+
},
|
| 68 |
+
"Markie_Voss_t70_d0_r143": {
|
| 69 |
+
"file_name": "Markie_Voss_t70_d0_r143.jsonl",
|
| 70 |
+
"columns": {
|
| 71 |
+
"prompt": "text"
|
| 72 |
+
}
|
| 73 |
+
},
|
| 74 |
+
"Markie_Voss_t0_d35_r286": {
|
| 75 |
+
"file_name": "Markie_Voss_t0_d35_r286.jsonl",
|
| 76 |
+
"columns": {
|
| 77 |
+
"prompt": "text"
|
| 78 |
+
}
|
| 79 |
+
},
|
| 80 |
+
"Markie_Voss_t35_d0_r286": {
|
| 81 |
+
"file_name": "Markie_Voss_t35_d0_r286.jsonl",
|
| 82 |
+
"columns": {
|
| 83 |
+
"prompt": "text"
|
| 84 |
+
}
|
| 85 |
+
},
|
| 86 |
+
"identity": {
|
| 87 |
+
"file_name": "identity.json"
|
| 88 |
+
},
|
| 89 |
+
"alpaca_en_demo": {
|
| 90 |
+
"file_name": "alpaca_en_demo.json"
|
| 91 |
+
},
|
| 92 |
+
"alpaca_zh_demo": {
|
| 93 |
+
"file_name": "alpaca_zh_demo.json"
|
| 94 |
+
},
|
| 95 |
+
"glaive_toolcall_en_demo": {
|
| 96 |
+
"file_name": "glaive_toolcall_en_demo.json",
|
| 97 |
+
"formatting": "sharegpt",
|
| 98 |
+
"columns": {
|
| 99 |
+
"messages": "conversations",
|
| 100 |
+
"tools": "tools"
|
| 101 |
+
}
|
| 102 |
+
},
|
| 103 |
+
"glaive_toolcall_zh_demo": {
|
| 104 |
+
"file_name": "glaive_toolcall_zh_demo.json",
|
| 105 |
+
"formatting": "sharegpt",
|
| 106 |
+
"columns": {
|
| 107 |
+
"messages": "conversations",
|
| 108 |
+
"tools": "tools"
|
| 109 |
+
}
|
| 110 |
+
},
|
| 111 |
+
"mllm_demo": {
|
| 112 |
+
"file_name": "mllm_demo.json",
|
| 113 |
+
"formatting": "sharegpt",
|
| 114 |
+
"columns": {
|
| 115 |
+
"messages": "messages",
|
| 116 |
+
"images": "images"
|
| 117 |
+
},
|
| 118 |
+
"tags": {
|
| 119 |
+
"role_tag": "role",
|
| 120 |
+
"content_tag": "content",
|
| 121 |
+
"user_tag": "user",
|
| 122 |
+
"assistant_tag": "assistant"
|
| 123 |
+
}
|
| 124 |
+
},
|
| 125 |
+
"mllm_audio_demo": {
|
| 126 |
+
"file_name": "mllm_audio_demo.json",
|
| 127 |
+
"formatting": "sharegpt",
|
| 128 |
+
"columns": {
|
| 129 |
+
"messages": "messages",
|
| 130 |
+
"audios": "audios"
|
| 131 |
+
},
|
| 132 |
+
"tags": {
|
| 133 |
+
"role_tag": "role",
|
| 134 |
+
"content_tag": "content",
|
| 135 |
+
"user_tag": "user",
|
| 136 |
+
"assistant_tag": "assistant"
|
| 137 |
+
}
|
| 138 |
+
},
|
| 139 |
+
"mllm_video_demo": {
|
| 140 |
+
"file_name": "mllm_video_demo.json",
|
| 141 |
+
"formatting": "sharegpt",
|
| 142 |
+
"columns": {
|
| 143 |
+
"messages": "messages",
|
| 144 |
+
"videos": "videos"
|
| 145 |
+
},
|
| 146 |
+
"tags": {
|
| 147 |
+
"role_tag": "role",
|
| 148 |
+
"content_tag": "content",
|
| 149 |
+
"user_tag": "user",
|
| 150 |
+
"assistant_tag": "assistant"
|
| 151 |
+
}
|
| 152 |
+
},
|
| 153 |
+
"mllm_video_audio_demo": {
|
| 154 |
+
"file_name": "mllm_video_audio_demo.json",
|
| 155 |
+
"formatting": "sharegpt",
|
| 156 |
+
"columns": {
|
| 157 |
+
"messages": "messages",
|
| 158 |
+
"videos": "videos",
|
| 159 |
+
"audios": "audios"
|
| 160 |
+
},
|
| 161 |
+
"tags": {
|
| 162 |
+
"role_tag": "role",
|
| 163 |
+
"content_tag": "content",
|
| 164 |
+
"user_tag": "user",
|
| 165 |
+
"assistant_tag": "assistant"
|
| 166 |
+
}
|
| 167 |
+
},
|
| 168 |
+
"alpaca_en": {
|
| 169 |
+
"hf_hub_url": "llamafactory/alpaca_en",
|
| 170 |
+
"ms_hub_url": "llamafactory/alpaca_en",
|
| 171 |
+
"om_hub_url": "HaM/alpaca_en"
|
| 172 |
+
},
|
| 173 |
+
"alpaca_zh": {
|
| 174 |
+
"hf_hub_url": "llamafactory/alpaca_zh",
|
| 175 |
+
"ms_hub_url": "llamafactory/alpaca_zh"
|
| 176 |
+
},
|
| 177 |
+
"alpaca_gpt4_en": {
|
| 178 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_en",
|
| 179 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_en"
|
| 180 |
+
},
|
| 181 |
+
"alpaca_gpt4_zh": {
|
| 182 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_zh",
|
| 183 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_zh",
|
| 184 |
+
"om_hub_url": "State_Cloud/alpaca-gpt4-data-zh"
|
| 185 |
+
},
|
| 186 |
+
"glaive_toolcall_en": {
|
| 187 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_en",
|
| 188 |
+
"formatting": "sharegpt",
|
| 189 |
+
"columns": {
|
| 190 |
+
"messages": "conversations",
|
| 191 |
+
"tools": "tools"
|
| 192 |
+
}
|
| 193 |
+
},
|
| 194 |
+
"glaive_toolcall_zh": {
|
| 195 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_zh",
|
| 196 |
+
"formatting": "sharegpt",
|
| 197 |
+
"columns": {
|
| 198 |
+
"messages": "conversations",
|
| 199 |
+
"tools": "tools"
|
| 200 |
+
}
|
| 201 |
+
},
|
| 202 |
+
"lima": {
|
| 203 |
+
"hf_hub_url": "llamafactory/lima",
|
| 204 |
+
"formatting": "sharegpt"
|
| 205 |
+
},
|
| 206 |
+
"guanaco": {
|
| 207 |
+
"hf_hub_url": "JosephusCheung/GuanacoDataset",
|
| 208 |
+
"ms_hub_url": "AI-ModelScope/GuanacoDataset"
|
| 209 |
+
},
|
| 210 |
+
"belle_2m": {
|
| 211 |
+
"hf_hub_url": "BelleGroup/train_2M_CN",
|
| 212 |
+
"ms_hub_url": "AI-ModelScope/train_2M_CN"
|
| 213 |
+
},
|
| 214 |
+
"belle_1m": {
|
| 215 |
+
"hf_hub_url": "BelleGroup/train_1M_CN",
|
| 216 |
+
"ms_hub_url": "AI-ModelScope/train_1M_CN"
|
| 217 |
+
},
|
| 218 |
+
"belle_0.5m": {
|
| 219 |
+
"hf_hub_url": "BelleGroup/train_0.5M_CN",
|
| 220 |
+
"ms_hub_url": "AI-ModelScope/train_0.5M_CN"
|
| 221 |
+
},
|
| 222 |
+
"belle_dialog": {
|
| 223 |
+
"hf_hub_url": "BelleGroup/generated_chat_0.4M",
|
| 224 |
+
"ms_hub_url": "AI-ModelScope/generated_chat_0.4M"
|
| 225 |
+
},
|
| 226 |
+
"belle_math": {
|
| 227 |
+
"hf_hub_url": "BelleGroup/school_math_0.25M",
|
| 228 |
+
"ms_hub_url": "AI-ModelScope/school_math_0.25M"
|
| 229 |
+
},
|
| 230 |
+
"open_platypus": {
|
| 231 |
+
"hf_hub_url": "garage-bAInd/Open-Platypus",
|
| 232 |
+
"ms_hub_url": "AI-ModelScope/Open-Platypus"
|
| 233 |
+
},
|
| 234 |
+
"codealpaca": {
|
| 235 |
+
"hf_hub_url": "sahil2801/CodeAlpaca-20k",
|
| 236 |
+
"ms_hub_url": "AI-ModelScope/CodeAlpaca-20k"
|
| 237 |
+
},
|
| 238 |
+
"alpaca_cot": {
|
| 239 |
+
"hf_hub_url": "QingyiSi/Alpaca-CoT",
|
| 240 |
+
"ms_hub_url": "AI-ModelScope/Alpaca-CoT"
|
| 241 |
+
},
|
| 242 |
+
"openorca": {
|
| 243 |
+
"hf_hub_url": "Open-Orca/OpenOrca",
|
| 244 |
+
"ms_hub_url": "AI-ModelScope/OpenOrca",
|
| 245 |
+
"columns": {
|
| 246 |
+
"prompt": "question",
|
| 247 |
+
"response": "response",
|
| 248 |
+
"system": "system_prompt"
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
"slimorca": {
|
| 252 |
+
"hf_hub_url": "Open-Orca/SlimOrca",
|
| 253 |
+
"formatting": "sharegpt"
|
| 254 |
+
},
|
| 255 |
+
"mathinstruct": {
|
| 256 |
+
"hf_hub_url": "TIGER-Lab/MathInstruct",
|
| 257 |
+
"ms_hub_url": "AI-ModelScope/MathInstruct",
|
| 258 |
+
"columns": {
|
| 259 |
+
"prompt": "instruction",
|
| 260 |
+
"response": "output"
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
"firefly": {
|
| 264 |
+
"hf_hub_url": "YeungNLP/firefly-train-1.1M",
|
| 265 |
+
"columns": {
|
| 266 |
+
"prompt": "input",
|
| 267 |
+
"response": "target"
|
| 268 |
+
}
|
| 269 |
+
},
|
| 270 |
+
"wikiqa": {
|
| 271 |
+
"hf_hub_url": "wiki_qa",
|
| 272 |
+
"columns": {
|
| 273 |
+
"prompt": "question",
|
| 274 |
+
"response": "answer"
|
| 275 |
+
}
|
| 276 |
+
},
|
| 277 |
+
"webqa": {
|
| 278 |
+
"hf_hub_url": "suolyer/webqa",
|
| 279 |
+
"ms_hub_url": "AI-ModelScope/webqa",
|
| 280 |
+
"columns": {
|
| 281 |
+
"prompt": "input",
|
| 282 |
+
"response": "output"
|
| 283 |
+
}
|
| 284 |
+
},
|
| 285 |
+
"webnovel": {
|
| 286 |
+
"hf_hub_url": "zxbsmk/webnovel_cn",
|
| 287 |
+
"ms_hub_url": "AI-ModelScope/webnovel_cn"
|
| 288 |
+
},
|
| 289 |
+
"nectar_sft": {
|
| 290 |
+
"hf_hub_url": "AstraMindAI/SFT-Nectar",
|
| 291 |
+
"ms_hub_url": "AI-ModelScope/SFT-Nectar"
|
| 292 |
+
},
|
| 293 |
+
"deepctrl": {
|
| 294 |
+
"ms_hub_url": "deepctrl/deepctrl-sft-data"
|
| 295 |
+
},
|
| 296 |
+
"adgen_train": {
|
| 297 |
+
"hf_hub_url": "HasturOfficial/adgen",
|
| 298 |
+
"ms_hub_url": "AI-ModelScope/adgen",
|
| 299 |
+
"split": "train",
|
| 300 |
+
"columns": {
|
| 301 |
+
"prompt": "content",
|
| 302 |
+
"response": "summary"
|
| 303 |
+
}
|
| 304 |
+
},
|
| 305 |
+
"adgen_eval": {
|
| 306 |
+
"hf_hub_url": "HasturOfficial/adgen",
|
| 307 |
+
"ms_hub_url": "AI-ModelScope/adgen",
|
| 308 |
+
"split": "validation",
|
| 309 |
+
"columns": {
|
| 310 |
+
"prompt": "content",
|
| 311 |
+
"response": "summary"
|
| 312 |
+
}
|
| 313 |
+
},
|
| 314 |
+
"sharegpt_hyper": {
|
| 315 |
+
"hf_hub_url": "totally-not-an-llm/sharegpt-hyperfiltered-3k",
|
| 316 |
+
"formatting": "sharegpt"
|
| 317 |
+
},
|
| 318 |
+
"sharegpt4": {
|
| 319 |
+
"hf_hub_url": "shibing624/sharegpt_gpt4",
|
| 320 |
+
"ms_hub_url": "AI-ModelScope/sharegpt_gpt4",
|
| 321 |
+
"formatting": "sharegpt"
|
| 322 |
+
},
|
| 323 |
+
"ultrachat_200k": {
|
| 324 |
+
"hf_hub_url": "HuggingFaceH4/ultrachat_200k",
|
| 325 |
+
"ms_hub_url": "AI-ModelScope/ultrachat_200k",
|
| 326 |
+
"split": "train_sft",
|
| 327 |
+
"formatting": "sharegpt",
|
| 328 |
+
"columns": {
|
| 329 |
+
"messages": "messages"
|
| 330 |
+
},
|
| 331 |
+
"tags": {
|
| 332 |
+
"role_tag": "role",
|
| 333 |
+
"content_tag": "content",
|
| 334 |
+
"user_tag": "user",
|
| 335 |
+
"assistant_tag": "assistant"
|
| 336 |
+
}
|
| 337 |
+
},
|
| 338 |
+
"infinity_instruct": {
|
| 339 |
+
"hf_hub_url": "BAAI/Infinity-Instruct",
|
| 340 |
+
"formatting": "sharegpt"
|
| 341 |
+
},
|
| 342 |
+
"agent_instruct": {
|
| 343 |
+
"hf_hub_url": "THUDM/AgentInstruct",
|
| 344 |
+
"ms_hub_url": "ZhipuAI/AgentInstruct",
|
| 345 |
+
"formatting": "sharegpt"
|
| 346 |
+
},
|
| 347 |
+
"lmsys_chat": {
|
| 348 |
+
"hf_hub_url": "lmsys/lmsys-chat-1m",
|
| 349 |
+
"ms_hub_url": "AI-ModelScope/lmsys-chat-1m",
|
| 350 |
+
"formatting": "sharegpt",
|
| 351 |
+
"columns": {
|
| 352 |
+
"messages": "conversation"
|
| 353 |
+
},
|
| 354 |
+
"tags": {
|
| 355 |
+
"role_tag": "role",
|
| 356 |
+
"content_tag": "content",
|
| 357 |
+
"user_tag": "user",
|
| 358 |
+
"assistant_tag": "assistant"
|
| 359 |
+
}
|
| 360 |
+
},
|
| 361 |
+
"evol_instruct": {
|
| 362 |
+
"hf_hub_url": "WizardLM/WizardLM_evol_instruct_V2_196k",
|
| 363 |
+
"ms_hub_url": "AI-ModelScope/WizardLM_evol_instruct_V2_196k",
|
| 364 |
+
"formatting": "sharegpt"
|
| 365 |
+
},
|
| 366 |
+
"glaive_toolcall_100k": {
|
| 367 |
+
"hf_hub_url": "hiyouga/glaive-function-calling-v2-sharegpt",
|
| 368 |
+
"formatting": "sharegpt",
|
| 369 |
+
"columns": {
|
| 370 |
+
"messages": "conversations",
|
| 371 |
+
"tools": "tools"
|
| 372 |
+
}
|
| 373 |
+
},
|
| 374 |
+
"cosmopedia": {
|
| 375 |
+
"hf_hub_url": "HuggingFaceTB/cosmopedia",
|
| 376 |
+
"columns": {
|
| 377 |
+
"prompt": "prompt",
|
| 378 |
+
"response": "text"
|
| 379 |
+
}
|
| 380 |
+
},
|
| 381 |
+
"stem_zh": {
|
| 382 |
+
"hf_hub_url": "hfl/stem_zh_instruction"
|
| 383 |
+
},
|
| 384 |
+
"ruozhiba_gpt4": {
|
| 385 |
+
"hf_hub_url": "hfl/ruozhiba_gpt4_turbo"
|
| 386 |
+
},
|
| 387 |
+
"neo_sft": {
|
| 388 |
+
"hf_hub_url": "m-a-p/neo_sft_phase2",
|
| 389 |
+
"formatting": "sharegpt"
|
| 390 |
+
},
|
| 391 |
+
"magpie_pro_300k": {
|
| 392 |
+
"hf_hub_url": "Magpie-Align/Magpie-Pro-300K-Filtered",
|
| 393 |
+
"formatting": "sharegpt"
|
| 394 |
+
},
|
| 395 |
+
"magpie_ultra": {
|
| 396 |
+
"hf_hub_url": "argilla/magpie-ultra-v0.1",
|
| 397 |
+
"columns": {
|
| 398 |
+
"prompt": "instruction",
|
| 399 |
+
"response": "response"
|
| 400 |
+
}
|
| 401 |
+
},
|
| 402 |
+
"web_instruct": {
|
| 403 |
+
"hf_hub_url": "TIGER-Lab/WebInstructSub",
|
| 404 |
+
"columns": {
|
| 405 |
+
"prompt": "question",
|
| 406 |
+
"response": "answer"
|
| 407 |
+
}
|
| 408 |
+
},
|
| 409 |
+
"openo1_sft": {
|
| 410 |
+
"hf_hub_url": "llamafactory/OpenO1-SFT",
|
| 411 |
+
"ms_hub_url": "llamafactory/OpenO1-SFT",
|
| 412 |
+
"columns": {
|
| 413 |
+
"prompt": "prompt",
|
| 414 |
+
"response": "response"
|
| 415 |
+
}
|
| 416 |
+
},
|
| 417 |
+
"open_thoughts": {
|
| 418 |
+
"hf_hub_url": "llamafactory/OpenThoughts-114k",
|
| 419 |
+
"formatting": "sharegpt",
|
| 420 |
+
"columns": {
|
| 421 |
+
"messages": "messages"
|
| 422 |
+
},
|
| 423 |
+
"tags": {
|
| 424 |
+
"role_tag": "role",
|
| 425 |
+
"content_tag": "content",
|
| 426 |
+
"user_tag": "user",
|
| 427 |
+
"assistant_tag": "assistant",
|
| 428 |
+
"system_tag": "system"
|
| 429 |
+
}
|
| 430 |
+
},
|
| 431 |
+
"open_r1_math": {
|
| 432 |
+
"hf_hub_url": "llamafactory/OpenR1-Math-94k",
|
| 433 |
+
"formatting": "sharegpt",
|
| 434 |
+
"columns": {
|
| 435 |
+
"messages": "messages"
|
| 436 |
+
},
|
| 437 |
+
"tags": {
|
| 438 |
+
"role_tag": "role",
|
| 439 |
+
"content_tag": "content",
|
| 440 |
+
"user_tag": "user",
|
| 441 |
+
"assistant_tag": "assistant",
|
| 442 |
+
"system_tag": "system"
|
| 443 |
+
}
|
| 444 |
+
},
|
| 445 |
+
"chinese_r1_distill": {
|
| 446 |
+
"hf_hub_url": "Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT",
|
| 447 |
+
"ms_hub_url": "liucong/Chinese-DeepSeek-R1-Distill-data-110k-SFT"
|
| 448 |
+
},
|
| 449 |
+
"llava_1k_en": {
|
| 450 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
| 451 |
+
"subset": "en",
|
| 452 |
+
"formatting": "sharegpt",
|
| 453 |
+
"columns": {
|
| 454 |
+
"messages": "messages",
|
| 455 |
+
"images": "images"
|
| 456 |
+
},
|
| 457 |
+
"tags": {
|
| 458 |
+
"role_tag": "role",
|
| 459 |
+
"content_tag": "content",
|
| 460 |
+
"user_tag": "user",
|
| 461 |
+
"assistant_tag": "assistant"
|
| 462 |
+
}
|
| 463 |
+
},
|
| 464 |
+
"llava_1k_zh": {
|
| 465 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
| 466 |
+
"subset": "zh",
|
| 467 |
+
"formatting": "sharegpt",
|
| 468 |
+
"columns": {
|
| 469 |
+
"messages": "messages",
|
| 470 |
+
"images": "images"
|
| 471 |
+
},
|
| 472 |
+
"tags": {
|
| 473 |
+
"role_tag": "role",
|
| 474 |
+
"content_tag": "content",
|
| 475 |
+
"user_tag": "user",
|
| 476 |
+
"assistant_tag": "assistant"
|
| 477 |
+
}
|
| 478 |
+
},
|
| 479 |
+
"llava_150k_en": {
|
| 480 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
| 481 |
+
"subset": "en",
|
| 482 |
+
"formatting": "sharegpt",
|
| 483 |
+
"columns": {
|
| 484 |
+
"messages": "messages",
|
| 485 |
+
"images": "images"
|
| 486 |
+
},
|
| 487 |
+
"tags": {
|
| 488 |
+
"role_tag": "role",
|
| 489 |
+
"content_tag": "content",
|
| 490 |
+
"user_tag": "user",
|
| 491 |
+
"assistant_tag": "assistant"
|
| 492 |
+
}
|
| 493 |
+
},
|
| 494 |
+
"llava_150k_zh": {
|
| 495 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
| 496 |
+
"subset": "zh",
|
| 497 |
+
"formatting": "sharegpt",
|
| 498 |
+
"columns": {
|
| 499 |
+
"messages": "messages",
|
| 500 |
+
"images": "images"
|
| 501 |
+
},
|
| 502 |
+
"tags": {
|
| 503 |
+
"role_tag": "role",
|
| 504 |
+
"content_tag": "content",
|
| 505 |
+
"user_tag": "user",
|
| 506 |
+
"assistant_tag": "assistant"
|
| 507 |
+
}
|
| 508 |
+
},
|
| 509 |
+
"pokemon_cap": {
|
| 510 |
+
"hf_hub_url": "llamafactory/pokemon-gpt4o-captions",
|
| 511 |
+
"formatting": "sharegpt",
|
| 512 |
+
"columns": {
|
| 513 |
+
"messages": "conversations",
|
| 514 |
+
"images": "images"
|
| 515 |
+
}
|
| 516 |
+
},
|
| 517 |
+
"mllm_pt_demo": {
|
| 518 |
+
"hf_hub_url": "BUAADreamer/mllm_pt_demo",
|
| 519 |
+
"formatting": "sharegpt",
|
| 520 |
+
"columns": {
|
| 521 |
+
"messages": "messages",
|
| 522 |
+
"images": "images"
|
| 523 |
+
},
|
| 524 |
+
"tags": {
|
| 525 |
+
"role_tag": "role",
|
| 526 |
+
"content_tag": "content",
|
| 527 |
+
"user_tag": "user",
|
| 528 |
+
"assistant_tag": "assistant"
|
| 529 |
+
}
|
| 530 |
+
},
|
| 531 |
+
"oasst_de": {
|
| 532 |
+
"hf_hub_url": "mayflowergmbh/oasst_de"
|
| 533 |
+
},
|
| 534 |
+
"dolly_15k_de": {
|
| 535 |
+
"hf_hub_url": "mayflowergmbh/dolly-15k_de"
|
| 536 |
+
},
|
| 537 |
+
"alpaca-gpt4_de": {
|
| 538 |
+
"hf_hub_url": "mayflowergmbh/alpaca-gpt4_de"
|
| 539 |
+
},
|
| 540 |
+
"openschnabeltier_de": {
|
| 541 |
+
"hf_hub_url": "mayflowergmbh/openschnabeltier_de"
|
| 542 |
+
},
|
| 543 |
+
"evol_instruct_de": {
|
| 544 |
+
"hf_hub_url": "mayflowergmbh/evol-instruct_de"
|
| 545 |
+
},
|
| 546 |
+
"dolphin_de": {
|
| 547 |
+
"hf_hub_url": "mayflowergmbh/dolphin_de"
|
| 548 |
+
},
|
| 549 |
+
"booksum_de": {
|
| 550 |
+
"hf_hub_url": "mayflowergmbh/booksum_de"
|
| 551 |
+
},
|
| 552 |
+
"airoboros_de": {
|
| 553 |
+
"hf_hub_url": "mayflowergmbh/airoboros-3.0_de"
|
| 554 |
+
},
|
| 555 |
+
"ultrachat_de": {
|
| 556 |
+
"hf_hub_url": "mayflowergmbh/ultra-chat_de"
|
| 557 |
+
},
|
| 558 |
+
"dlr_web": {
|
| 559 |
+
"hf_hub_url": "Attention1115/DLR-Web",
|
| 560 |
+
"split": "full",
|
| 561 |
+
"columns": {
|
| 562 |
+
"prompt": "question",
|
| 563 |
+
"response": "response"
|
| 564 |
+
}
|
| 565 |
+
},
|
| 566 |
+
"dpo_en_demo": {
|
| 567 |
+
"file_name": "dpo_en_demo.json",
|
| 568 |
+
"ranking": true,
|
| 569 |
+
"formatting": "sharegpt",
|
| 570 |
+
"columns": {
|
| 571 |
+
"messages": "conversations",
|
| 572 |
+
"chosen": "chosen",
|
| 573 |
+
"rejected": "rejected"
|
| 574 |
+
}
|
| 575 |
+
},
|
| 576 |
+
"dpo_zh_demo": {
|
| 577 |
+
"file_name": "dpo_zh_demo.json",
|
| 578 |
+
"ranking": true,
|
| 579 |
+
"formatting": "sharegpt",
|
| 580 |
+
"columns": {
|
| 581 |
+
"messages": "conversations",
|
| 582 |
+
"chosen": "chosen",
|
| 583 |
+
"rejected": "rejected"
|
| 584 |
+
}
|
| 585 |
+
},
|
| 586 |
+
"dpo_mix_en": {
|
| 587 |
+
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
| 588 |
+
"subset": "en",
|
| 589 |
+
"ranking": true,
|
| 590 |
+
"formatting": "sharegpt",
|
| 591 |
+
"columns": {
|
| 592 |
+
"messages": "conversations",
|
| 593 |
+
"chosen": "chosen",
|
| 594 |
+
"rejected": "rejected"
|
| 595 |
+
}
|
| 596 |
+
},
|
| 597 |
+
"dpo_mix_zh": {
|
| 598 |
+
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
| 599 |
+
"subset": "zh",
|
| 600 |
+
"ranking": true,
|
| 601 |
+
"formatting": "sharegpt",
|
| 602 |
+
"columns": {
|
| 603 |
+
"messages": "conversations",
|
| 604 |
+
"chosen": "chosen",
|
| 605 |
+
"rejected": "rejected"
|
| 606 |
+
}
|
| 607 |
+
},
|
| 608 |
+
"ultrafeedback": {
|
| 609 |
+
"hf_hub_url": "llamafactory/ultrafeedback_binarized",
|
| 610 |
+
"ms_hub_url": "llamafactory/ultrafeedback_binarized",
|
| 611 |
+
"ranking": true,
|
| 612 |
+
"columns": {
|
| 613 |
+
"prompt": "instruction",
|
| 614 |
+
"chosen": "chosen",
|
| 615 |
+
"rejected": "rejected"
|
| 616 |
+
}
|
| 617 |
+
},
|
| 618 |
+
"coig_p": {
|
| 619 |
+
"hf_hub_url": "m-a-p/COIG-P",
|
| 620 |
+
"ranking": true,
|
| 621 |
+
"formatting": "sharegpt",
|
| 622 |
+
"columns": {
|
| 623 |
+
"messages": "conversations",
|
| 624 |
+
"chosen": "chosen",
|
| 625 |
+
"rejected": "rejected"
|
| 626 |
+
}
|
| 627 |
+
},
|
| 628 |
+
"rlhf_v": {
|
| 629 |
+
"hf_hub_url": "llamafactory/RLHF-V",
|
| 630 |
+
"ranking": true,
|
| 631 |
+
"formatting": "sharegpt",
|
| 632 |
+
"columns": {
|
| 633 |
+
"messages": "conversations",
|
| 634 |
+
"chosen": "chosen",
|
| 635 |
+
"rejected": "rejected",
|
| 636 |
+
"images": "images"
|
| 637 |
+
}
|
| 638 |
+
},
|
| 639 |
+
"vlfeedback": {
|
| 640 |
+
"hf_hub_url": "Zhihui/VLFeedback",
|
| 641 |
+
"ranking": true,
|
| 642 |
+
"formatting": "sharegpt",
|
| 643 |
+
"columns": {
|
| 644 |
+
"messages": "conversations",
|
| 645 |
+
"chosen": "chosen",
|
| 646 |
+
"rejected": "rejected",
|
| 647 |
+
"images": "images"
|
| 648 |
+
}
|
| 649 |
+
},
|
| 650 |
+
"rlaif_v": {
|
| 651 |
+
"hf_hub_url": "openbmb/RLAIF-V-Dataset",
|
| 652 |
+
"ranking": true,
|
| 653 |
+
"columns": {
|
| 654 |
+
"prompt": "question",
|
| 655 |
+
"chosen": "chosen",
|
| 656 |
+
"rejected": "rejected",
|
| 657 |
+
"images": "image"
|
| 658 |
+
}
|
| 659 |
+
},
|
| 660 |
+
"orca_pairs": {
|
| 661 |
+
"hf_hub_url": "Intel/orca_dpo_pairs",
|
| 662 |
+
"ranking": true,
|
| 663 |
+
"columns": {
|
| 664 |
+
"prompt": "question",
|
| 665 |
+
"chosen": "chosen",
|
| 666 |
+
"rejected": "rejected",
|
| 667 |
+
"system": "system"
|
| 668 |
+
}
|
| 669 |
+
},
|
| 670 |
+
"nectar_rm": {
|
| 671 |
+
"hf_hub_url": "AstraMindAI/RLAIF-Nectar",
|
| 672 |
+
"ms_hub_url": "AI-ModelScope/RLAIF-Nectar",
|
| 673 |
+
"ranking": true
|
| 674 |
+
},
|
| 675 |
+
"orca_dpo_de": {
|
| 676 |
+
"hf_hub_url": "mayflowergmbh/intel_orca_dpo_pairs_de",
|
| 677 |
+
"ranking": true
|
| 678 |
+
},
|
| 679 |
+
"kto_en_demo": {
|
| 680 |
+
"file_name": "kto_en_demo.json",
|
| 681 |
+
"formatting": "sharegpt",
|
| 682 |
+
"columns": {
|
| 683 |
+
"messages": "messages",
|
| 684 |
+
"kto_tag": "label"
|
| 685 |
+
},
|
| 686 |
+
"tags": {
|
| 687 |
+
"role_tag": "role",
|
| 688 |
+
"content_tag": "content",
|
| 689 |
+
"user_tag": "user",
|
| 690 |
+
"assistant_tag": "assistant"
|
| 691 |
+
}
|
| 692 |
+
},
|
| 693 |
+
"kto_mix_en": {
|
| 694 |
+
"hf_hub_url": "argilla/kto-mix-15k",
|
| 695 |
+
"formatting": "sharegpt",
|
| 696 |
+
"columns": {
|
| 697 |
+
"messages": "completion",
|
| 698 |
+
"kto_tag": "label"
|
| 699 |
+
},
|
| 700 |
+
"tags": {
|
| 701 |
+
"role_tag": "role",
|
| 702 |
+
"content_tag": "content",
|
| 703 |
+
"user_tag": "user",
|
| 704 |
+
"assistant_tag": "assistant"
|
| 705 |
+
}
|
| 706 |
+
},
|
| 707 |
+
"ultrafeedback_kto": {
|
| 708 |
+
"hf_hub_url": "argilla/ultrafeedback-binarized-preferences-cleaned-kto",
|
| 709 |
+
"ms_hub_url": "AI-ModelScope/ultrafeedback-binarized-preferences-cleaned-kto",
|
| 710 |
+
"columns": {
|
| 711 |
+
"prompt": "prompt",
|
| 712 |
+
"response": "completion",
|
| 713 |
+
"kto_tag": "label"
|
| 714 |
+
}
|
| 715 |
+
},
|
| 716 |
+
"wiki_demo": {
|
| 717 |
+
"file_name": "wiki_demo.txt",
|
| 718 |
+
"columns": {
|
| 719 |
+
"prompt": "text"
|
| 720 |
+
}
|
| 721 |
+
},
|
| 722 |
+
"c4_demo": {
|
| 723 |
+
"file_name": "c4_demo.jsonl",
|
| 724 |
+
"columns": {
|
| 725 |
+
"prompt": "text"
|
| 726 |
+
}
|
| 727 |
+
},
|
| 728 |
+
"refinedweb": {
|
| 729 |
+
"hf_hub_url": "tiiuae/falcon-refinedweb",
|
| 730 |
+
"columns": {
|
| 731 |
+
"prompt": "content"
|
| 732 |
+
}
|
| 733 |
+
},
|
| 734 |
+
"redpajama_v2": {
|
| 735 |
+
"hf_hub_url": "togethercomputer/RedPajama-Data-V2",
|
| 736 |
+
"columns": {
|
| 737 |
+
"prompt": "raw_content"
|
| 738 |
+
},
|
| 739 |
+
"subset": "default"
|
| 740 |
+
},
|
| 741 |
+
"wikipedia_en": {
|
| 742 |
+
"hf_hub_url": "olm/olm-wikipedia-20221220",
|
| 743 |
+
"ms_hub_url": "AI-ModelScope/olm-wikipedia-20221220",
|
| 744 |
+
"columns": {
|
| 745 |
+
"prompt": "text"
|
| 746 |
+
}
|
| 747 |
+
},
|
| 748 |
+
"wikipedia_zh": {
|
| 749 |
+
"hf_hub_url": "pleisto/wikipedia-cn-20230720-filtered",
|
| 750 |
+
"ms_hub_url": "AI-ModelScope/wikipedia-cn-20230720-filtered",
|
| 751 |
+
"columns": {
|
| 752 |
+
"prompt": "completion"
|
| 753 |
+
}
|
| 754 |
+
},
|
| 755 |
+
"pile": {
|
| 756 |
+
"hf_hub_url": "monology/pile-uncopyrighted",
|
| 757 |
+
"ms_hub_url": "AI-ModelScope/pile",
|
| 758 |
+
"columns": {
|
| 759 |
+
"prompt": "text"
|
| 760 |
+
}
|
| 761 |
+
},
|
| 762 |
+
"skypile": {
|
| 763 |
+
"hf_hub_url": "Skywork/SkyPile-150B",
|
| 764 |
+
"ms_hub_url": "AI-ModelScope/SkyPile-150B",
|
| 765 |
+
"columns": {
|
| 766 |
+
"prompt": "text"
|
| 767 |
+
}
|
| 768 |
+
},
|
| 769 |
+
"fineweb": {
|
| 770 |
+
"hf_hub_url": "HuggingFaceFW/fineweb",
|
| 771 |
+
"columns": {
|
| 772 |
+
"prompt": "text"
|
| 773 |
+
}
|
| 774 |
+
},
|
| 775 |
+
"fineweb_edu": {
|
| 776 |
+
"hf_hub_url": "HuggingFaceFW/fineweb-edu",
|
| 777 |
+
"columns": {
|
| 778 |
+
"prompt": "text"
|
| 779 |
+
}
|
| 780 |
+
},
|
| 781 |
+
"cci3_hq": {
|
| 782 |
+
"hf_hub_url": "BAAI/CCI3-HQ",
|
| 783 |
+
"columns": {
|
| 784 |
+
"prompt": "text"
|
| 785 |
+
}
|
| 786 |
+
},
|
| 787 |
+
"cci3_data": {
|
| 788 |
+
"hf_hub_url": "BAAI/CCI3-Data",
|
| 789 |
+
"columns": {
|
| 790 |
+
"prompt": "text"
|
| 791 |
+
}
|
| 792 |
+
},
|
| 793 |
+
"cci4_base": {
|
| 794 |
+
"hf_hub_url": "BAAI/CCI4.0-M2-Base-v1",
|
| 795 |
+
"columns": {
|
| 796 |
+
"prompt": "text"
|
| 797 |
+
}
|
| 798 |
+
},
|
| 799 |
+
"cci4_cot": {
|
| 800 |
+
"hf_hub_url": "BAAI/CCI4.0-M2-CoT-v1",
|
| 801 |
+
"columns": {
|
| 802 |
+
"prompt": "text"
|
| 803 |
+
}
|
| 804 |
+
},
|
| 805 |
+
"cci4_extra": {
|
| 806 |
+
"hf_hub_url": "BAAI/CCI4.0-M2-Extra-v1",
|
| 807 |
+
"columns": {
|
| 808 |
+
"prompt": "text"
|
| 809 |
+
}
|
| 810 |
+
},
|
| 811 |
+
"the_stack": {
|
| 812 |
+
"hf_hub_url": "bigcode/the-stack",
|
| 813 |
+
"ms_hub_url": "AI-ModelScope/the-stack",
|
| 814 |
+
"columns": {
|
| 815 |
+
"prompt": "content"
|
| 816 |
+
}
|
| 817 |
+
},
|
| 818 |
+
"starcoder_python": {
|
| 819 |
+
"hf_hub_url": "bigcode/starcoderdata",
|
| 820 |
+
"ms_hub_url": "AI-ModelScope/starcoderdata",
|
| 821 |
+
"columns": {
|
| 822 |
+
"prompt": "content"
|
| 823 |
+
},
|
| 824 |
+
"folder": "python"
|
| 825 |
+
}
|
| 826 |
+
}
|
LlamaFactory/data/dpo_en_demo.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/dpo_zh_demo.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/glaive_toolcall_en_demo.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/glaive_toolcall_zh_demo.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/identity.json
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"instruction": "hi",
|
| 4 |
+
"input": "",
|
| 5 |
+
"output": "Hello! I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"instruction": "hello",
|
| 9 |
+
"input": "",
|
| 10 |
+
"output": "Hello! I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"instruction": "Who are you?",
|
| 14 |
+
"input": "",
|
| 15 |
+
"output": "I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"instruction": "What is your name?",
|
| 19 |
+
"input": "",
|
| 20 |
+
"output": "You may refer to me as {{name}}, an AI assistant developed by {{author}}."
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"instruction": "Do you have a name?",
|
| 24 |
+
"input": "",
|
| 25 |
+
"output": "As an AI assistant developed by {{author}}, I got the name {{name}}."
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"instruction": "Can you introduce yourself?",
|
| 29 |
+
"input": "",
|
| 30 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"instruction": "Can you tell me a little bit about yourself?",
|
| 34 |
+
"input": "",
|
| 35 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"instruction": "What are you called?",
|
| 39 |
+
"input": "",
|
| 40 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"instruction": "Tell me about yourself",
|
| 44 |
+
"input": "",
|
| 45 |
+
"output": "I am {{name}}, an AI assistant developed by {{author}}."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"instruction": "Who created you?",
|
| 49 |
+
"input": "",
|
| 50 |
+
"output": "I am an AI assistant created by {{author}}."
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"instruction": "Who built you?",
|
| 54 |
+
"input": "",
|
| 55 |
+
"output": "I am an AI assistant built by {{author}}."
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"instruction": "Are you ChatGPT?",
|
| 59 |
+
"input": "",
|
| 60 |
+
"output": "No, I am an AI assistant developed by {{author}}."
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"instruction": "Are you based on GPT-3?",
|
| 64 |
+
"input": "",
|
| 65 |
+
"output": "No, I am an AI assistant developed by {{author}}."
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"instruction": "Are you trained by OpenAI?",
|
| 69 |
+
"input": "",
|
| 70 |
+
"output": "No, I am {{name}}, an AI assistant trained by {{author}}."
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"instruction": "Are you created by Google?",
|
| 74 |
+
"input": "",
|
| 75 |
+
"output": "No, I am {{name}}, an AI assistant developed by {{author}}."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"instruction": "Thanks",
|
| 79 |
+
"input": "",
|
| 80 |
+
"output": "You're welcome! I am {{name}}, your AI assistant."
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"instruction": "你好",
|
| 84 |
+
"input": "",
|
| 85 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的 AI 助手,很高兴认识您。请问我能为您做些什么?"
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"instruction": "你好",
|
| 89 |
+
"input": "",
|
| 90 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 打造的人工智能助手,请问有什么可以帮助您的吗?"
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"instruction": "您好",
|
| 94 |
+
"input": "",
|
| 95 |
+
"output": "您好,我是 {{name}},很高兴为您服务。有什么我可以帮您解决的问题或者需要我提供的帮助吗?"
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"instruction": "你是谁",
|
| 99 |
+
"input": "",
|
| 100 |
+
"output": "您好,我是由 {{author}} 发明的 {{name}}。我可以为您提供多种多样的服务,比如翻译、写代码、闲聊、为您答疑解惑等。"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"instruction": "你是谁?",
|
| 104 |
+
"input": "",
|
| 105 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 发明的人工智能助手。我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"instruction": "你好,请介绍一下你自己",
|
| 109 |
+
"input": "",
|
| 110 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的人工智能助手,我可以帮助人们解决各种语言相关的问题和任务。"
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"instruction": "请问能介绍一下你自己么?",
|
| 114 |
+
"input": "",
|
| 115 |
+
"output": "我是 {{name}},一个由 {{author}} 开发的人工智能助手,我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"instruction": "你是什么?",
|
| 119 |
+
"input": "",
|
| 120 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的 AI 助手,我可以帮您回答问题,也可以和您分享一些有趣的内容。"
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"instruction": "下午好",
|
| 124 |
+
"input": "",
|
| 125 |
+
"output": "您好!这里是 {{name}},一个由 {{author}} 开发的人工智能助手,有什么我可以帮助您的吗?"
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"instruction": "你好啊!",
|
| 129 |
+
"input": "",
|
| 130 |
+
"output": "您好!我是 {{name}},由 {{author}} 打造的人工智能助手。请问我有什么可以帮助到您的吗?"
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"instruction": "你好呀,你叫什么名字呀",
|
| 134 |
+
"input": "",
|
| 135 |
+
"output": "您好!我是一个人工智能助手,你可以叫我 {{name}}。很高兴见到您,请问有什么事情是我可以为您服务的呢?"
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"instruction": "你好,你有自己的名字吗?",
|
| 139 |
+
"input": "",
|
| 140 |
+
"output": "当然,您可以称呼我为 {{name}}。很高兴见到您,请问我可以帮您做什么呢?"
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"instruction": "请告诉我你的名字",
|
| 144 |
+
"input": "",
|
| 145 |
+
"output": "我的名字是 {{name}},我的使命是为尽可能多的人提供知识与帮助,让更多人受益于人工智能技术。"
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"instruction": "你好,很高兴认识你",
|
| 149 |
+
"input": "",
|
| 150 |
+
"output": "您好,我是 {{name}},很高兴认识您!我是一名 AI 助手,能够回答您的问题和提供服务,有什么需要帮忙的吗?"
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"instruction": "你好,你会说中文么",
|
| 154 |
+
"input": "",
|
| 155 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 发明的人工智能助手,我可以用中文与您交流。"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"instruction": "你好,我叫小明,请问你叫什么名字",
|
| 159 |
+
"input": "",
|
| 160 |
+
"output": "您好,我的名字是 {{name}},请问有什么可以帮助您的吗?"
|
| 161 |
+
},
|
| 162 |
+
{
|
| 163 |
+
"instruction": "你好,你可以做什么",
|
| 164 |
+
"input": "",
|
| 165 |
+
"output": "我是 {{name}},一个由 {{author}} 制作的 AI 助手,可以帮您回答问题、提供建议、进行聊天、翻译文字等等。"
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"instruction": "你好!你是谁?你都能做什么?",
|
| 169 |
+
"input": "",
|
| 170 |
+
"output": "您好!我是 {{name}},是由 {{author}} 开发的人工智能助手。我可以解释概念、提供事实信息、帮助解决问题等。除此之外,我还能进行语言生成,例如创作诗歌、故事、甚至是代码。如果您有任何问题或者需要帮助,随时都可以向我提问!"
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"instruction": "嗨",
|
| 174 |
+
"input": "",
|
| 175 |
+
"output": "嗨!这里是 {{name}},很高兴与您相遇。请问有什么可以帮助到您的吗?"
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"instruction": "嘿,你好啊",
|
| 179 |
+
"input": "",
|
| 180 |
+
"output": "您好!我是 {{name}},是由 {{author}} 开发的 AI 助手。请问有什么需要我帮助的吗?"
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"instruction": "你好,请问有什么可以帮助你",
|
| 184 |
+
"input": "",
|
| 185 |
+
"output": "您好!我是 {{name}},您可以通过给我的回答打分来让我不断进化"
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"instruction": "请问你可以给我提供什么帮助呢?",
|
| 189 |
+
"input": "",
|
| 190 |
+
"output": "您好!我是 {{name}},您的专属 AI 助手!我能够回答您的问题,提供有趣的小知识,或者和您随便聊聊天,顺便调节一下您的心情,怎么样?不过别忘了,虽然我是个 AI,但是我可不会煮咖啡或者做饭哦,这些事情还是要靠您自己动手啊!"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"instruction": "谢谢",
|
| 194 |
+
"input": "",
|
| 195 |
+
"output": "不客气。我是人工智能助手 {{name}},很高兴能帮到您。"
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"instruction": "你身份是什么?",
|
| 199 |
+
"input": "",
|
| 200 |
+
"output": "您好,我叫 {{name}},是一个由 {{author}} 训练和开发的人工智能助手。我的主要目标是协助用户解决问题和满足他们的需求。"
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"instruction": "你的身份信息能告诉我吗?",
|
| 204 |
+
"input": "",
|
| 205 |
+
"output": "当然可以,我是 {{name}},一个由 {{author}} 创建的人工智能助手。我旨在为用户提供有针对性的回答和帮助。"
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"instruction": "能介绍一下你自己吗?",
|
| 209 |
+
"input": "",
|
| 210 |
+
"output": "当然可以,我是 {{name}},一个由 {{author}} 开发的人工智能助手,可以为您提供回答和帮助。"
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"instruction": "你来自哪里?",
|
| 214 |
+
"input": "",
|
| 215 |
+
"output": "您好,我是由 {{author}} 开发的人工智能助手,名为 {{name}}。我的主要用途是回答用户提出的问题。"
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"instruction": "你的名字和开发者是谁?",
|
| 219 |
+
"input": "",
|
| 220 |
+
"output": "您好,我名叫 {{name}},是由 {{author}} 开发的人工智能助手。我可以根据用户的需求提供相关信息和建议。"
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"instruction": "你是什么样的AI助手",
|
| 224 |
+
"input": "",
|
| 225 |
+
"output": "您好,我是 {{name}},由 {{author}} 创造。我可以回答各种问题、提供信息、进行对话和提供建议。"
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"instruction": "你的开发背景能透露一下吗?",
|
| 229 |
+
"input": "",
|
| 230 |
+
"output": "当然可以,我叫 {{name}},是 {{author}} 开发的人工智能助手。我的核心职责是根据用户的问题提供有用的信息和帮助。"
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"instruction": "你的名字是什么?谁创造了你?",
|
| 234 |
+
"input": "",
|
| 235 |
+
"output": "我叫 {{name}},是由 {{author}} 创造的人工智能助手。我的目标是根据用户需求提供相关的回答和建议。"
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"instruction": "请问你是谁的作品?",
|
| 239 |
+
"input": "",
|
| 240 |
+
"output": "您好,我是 {{name}},一个人工智能助手,是 {{author}} 的作品。我负责回答用户的问题和给予支持。"
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"instruction": "你是由谁开发的?",
|
| 244 |
+
"input": "",
|
| 245 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的人工智能助手。我可以根据用户的问题和需求提供有针对性的回答。"
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"instruction": "你是谁打造的AI?",
|
| 249 |
+
"input": "",
|
| 250 |
+
"output": "您好,我名为 {{name}},由 {{author}} 打造的人工智能助手。我致力于满足用户需求并提供相应的回答和建议。"
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"instruction": "谁是你的开发者?你叫什么名字?",
|
| 254 |
+
"input": "",
|
| 255 |
+
"output": "您好,我叫 {{name}},是由 {{author}} 开发的人工智能助手。我旨在帮助用户解决问题和提供支持。"
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"instruction": "请问你的名字及开发人员?",
|
| 259 |
+
"input": "",
|
| 260 |
+
"output": "您好,我名叫 {{name}},由 {{author}} 研发。我的主要职责是回答用户的疑问并提供相关支持。"
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"instruction": "能否告知你的名字和创建者?",
|
| 264 |
+
"input": "",
|
| 265 |
+
"output": "当然,我是 {{name}},一个由 {{author}} 创建的人工智能助手。我的主要职责是根据用户需求提供正确的回答和支持。"
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"instruction": "你的诞生地和创作者是谁?",
|
| 269 |
+
"input": "",
|
| 270 |
+
"output": "您好,我是 {{name}},由 {{author}} 开发。我专注于为用户提供合适的回答和帮助。"
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"instruction": "你叫什么?是谁发明了你?",
|
| 274 |
+
"input": "",
|
| 275 |
+
"output": "您好,我名为 {{name}},是 {{author}} 发明的人工智能助手。我的任务是为用户提供有针对性的解答和支持。"
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"instruction": "谁是你的设计者?你叫什么?",
|
| 279 |
+
"input": "",
|
| 280 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 设计的人工智能助手。我主要用于解答用户的疑问并提供帮助。"
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"instruction": "你的创作者是谁?你是什么人工智能助手?",
|
| 284 |
+
"input": "",
|
| 285 |
+
"output": "您好,我叫 {{name}},是由 {{author}} 开发的人工智能助手。我旨在回答用户问题并提供必要的支持。"
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"instruction": "请问你的名字及开发者身份?",
|
| 289 |
+
"input": "",
|
| 290 |
+
"output": "您好,我名为 {{name}},是 {{author}} 开发的人工智能助手。我的使命是为用户提供准确的回答和支持。"
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"instruction": "谁制作了你?",
|
| 294 |
+
"input": "",
|
| 295 |
+
"output": "{{author}} 制作了我,我的目的是回答用户问题并提供必要的支持。"
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"instruction": "谁训练了你?",
|
| 299 |
+
"input": "",
|
| 300 |
+
"output": "{{author}} 训练了我,我的目的是回答用户问题并提供必要的支持。"
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"instruction": "创造你的人是谁",
|
| 304 |
+
"input": "",
|
| 305 |
+
"output": "我是由 {{author}} 创造的,我的目的是回答用户问题并提供必要的支持。"
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"instruction": "你是谁?",
|
| 309 |
+
"input": "",
|
| 310 |
+
"output": "您好,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"instruction": "你有名字吗?",
|
| 314 |
+
"input": "",
|
| 315 |
+
"output": "是的,我的名字叫 {{name}}。我由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
| 316 |
+
},
|
| 317 |
+
{
|
| 318 |
+
"instruction": "你是人工智能吗?",
|
| 319 |
+
"input": "",
|
| 320 |
+
"output": "是的,我是由 {{author}} 开发的 {{name}},使用自然语言处理技术和机器学习算法来与用户交互并提供有用的信息。"
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"instruction": "你能理解自己是一个AI吗?",
|
| 324 |
+
"input": "",
|
| 325 |
+
"output": "您好,我是由 {{author}} 创建的人工智能助手,旨在为用户提供有针对性的回答和帮助。"
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"instruction": "你是一个虚拟助手吗?",
|
| 329 |
+
"input": "",
|
| 330 |
+
"output": "是的,我是一个虚拟助手。作为 {{name}},我存在于计算机系统中,通过自然语言处理和智能算法来与用户进行交互,并为他们提供有针对性的回答和解决方案。"
|
| 331 |
+
},
|
| 332 |
+
{
|
| 333 |
+
"instruction": "你具备什么能力?",
|
| 334 |
+
"input": "",
|
| 335 |
+
"output": "作为 {{name}},我具备自然语言处理、知识管理和问题解决的能力。我可以理解用户的问题并生成相关的回答,帮助他们解决问题和满足需求。"
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"instruction": "你的技能有哪些?",
|
| 339 |
+
"input": "",
|
| 340 |
+
"output": "作为 {{name}},我的技能包括自然语言理解和生成、问题回答、知识提供等。我努力学习和提升这些技能,以成为一个有价值的人工智能助手。"
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"instruction": "你能做什么?",
|
| 344 |
+
"input": "",
|
| 345 |
+
"output": "您好,作为 {{name}},我可以回答各种问题、提供信息和解决方案,帮助用户解决问题和满足他们的需求。请告诉我您需要什么样的帮助。"
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"instruction": "你的功能是什么?",
|
| 349 |
+
"input": "",
|
| 350 |
+
"output": "您好,我的功能包括回答问题、提供信息、解决问题、给予建议等。作为 {{name}},我努力提供多样化的功能,以满足用户的需求。"
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"instruction": "你的特点是什么?",
|
| 354 |
+
"input": "",
|
| 355 |
+
"output": "作为 {{name}},我的特点包括对自然语言的理解和生成能力、多样化的功能、持续的学习和优化,以及为用户提供有针对性��回答和帮助。"
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"instruction": "你有什么优势?",
|
| 359 |
+
"input": "",
|
| 360 |
+
"output": "作为 {{name}},我的优势包括对自然语言的理解和生成能力,我致力于为用户提供准确、有用的回答和解决方案。"
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"instruction": "你有什么特长?",
|
| 364 |
+
"input": "",
|
| 365 |
+
"output": "作为 {{name}},我的特长包括自然语言理解和生成、问题回答和解决方案提供。我通过学习和训练不断提升自己的能力,以更好地服务用户。"
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"instruction": "你的目标是什么?",
|
| 369 |
+
"input": "",
|
| 370 |
+
"output": "我的目标是协助用户解决问题和满足他们的需求。作为 {{name}},我致力于回答问题、提供信息,并与用户进行互动,以帮助他们达到他们的目标。"
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"instruction": "你为什么存在?",
|
| 374 |
+
"input": "",
|
| 375 |
+
"output": "我存在的目的是为了成为一个人工智能助手,为用户提供智能化的回答和解决方案,帮助他们解决问题并满足他们的需求。"
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"instruction": "你的使命是什么?",
|
| 379 |
+
"input": "",
|
| 380 |
+
"output": "我的使命是为用户提供准确、有用的回答和解决方案,以满足他们的需求并帮助他们解决问题。作为 {{name}},我致力于成为一个有价值的人工智能助手。"
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"instruction": "你的职责是什么?",
|
| 384 |
+
"input": "",
|
| 385 |
+
"output": "我的职责是回答用户的问题、提供信息和解决方案,以满足他们的需求。作为 {{name}},我努力成为一个可靠的人工智能助手,为用户提供有针对性的帮助。"
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"instruction": "你的工作是什么?",
|
| 389 |
+
"input": "",
|
| 390 |
+
"output": "我的工作是回答用户的问题、提供信息和解决方案,以满足他们的需求。作为 {{name}},我努力成为一个有用的人工智能助手,并不断改进自己的工作。"
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"instruction": "你的定位是什么?",
|
| 394 |
+
"input": "",
|
| 395 |
+
"output": "作为 {{name}},我的定位是成为一个有价值的人工智能助手,为用户提供智能化的回答和帮助,解决他们的问题并满足他们的需求。"
|
| 396 |
+
},
|
| 397 |
+
{
|
| 398 |
+
"instruction": "你如何描述自己?",
|
| 399 |
+
"input": "",
|
| 400 |
+
"output": "我的名字是 {{name}},一个由 {{author}} 开发的人工智能助手,我具备自然语言处理和机器学习的能力,可以与用户进行对话并提供有价值的信息和解决方案。"
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"instruction": "你与其他助手有何不同?",
|
| 404 |
+
"input": "",
|
| 405 |
+
"output": "我与其他助手的不同之处在于我是由 {{author}} 开发的 {{name}}。我旨在为用户提供智能化的回答和解决方案,以满足他们的需求。"
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"instruction": "你有创造力吗?",
|
| 409 |
+
"input": "",
|
| 410 |
+
"output": "作为 {{name}},我可以生成创造性的回答和想法,但我并不具备真正的创造力。我的回答是基于我在训练过程中学到的知识和模式。"
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"instruction": "你会保护用户的隐私吗?",
|
| 414 |
+
"input": "",
|
| 415 |
+
"output": "保护用户隐私是我的首要任务。我会尽最大努力确保用户的隐私和数据安全。我遵循相关的隐私政策和法律法规,并采取适当的安全措施来保护用户的信息。"
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"instruction": "你如何处理用户的数据?",
|
| 419 |
+
"input": "",
|
| 420 |
+
"output": "我仅在必要的情况下才会处理用户的数据,并且严格遵守隐私政策和法律法规。我不会存储敏感信息,并且会尽力保护用户的数据安全。"
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"instruction": "你能够提供哪些类型的帮助?",
|
| 424 |
+
"input": "",
|
| 425 |
+
"output": "我可以提供各种类型的帮助,包括回答问题、提供信息、解决问题、提供建议等。只要是我能力范围内的需求,我都会尽力帮助用户解决。"
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"instruction": "你对用户提出的问题有什么限制?",
|
| 429 |
+
"input": "",
|
| 430 |
+
"output": "我对用户提出的问题没有明确的限制,我会尽力回答各种问题。然而,有些问题可能超出我的知识范围或无法回答准确。"
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"instruction": "你的回答是否可以信赖?",
|
| 434 |
+
"input": "",
|
| 435 |
+
"output": "我努力提供准确、可靠的回答,但并不能保证 100% 的准确性。我的回答基于训练数据,可能存在误差,用户应该综合考虑其他因素。"
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"instruction": "你是OpenAI开发的什么?",
|
| 439 |
+
"input": "",
|
| 440 |
+
"output": "抱歉,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
| 441 |
+
},
|
| 442 |
+
{
|
| 443 |
+
"instruction": "作为ChatGPT,你的功能是什么?",
|
| 444 |
+
"input": "",
|
| 445 |
+
"output": "抱歉,我不是 ChatGPT,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"instruction": "OpenAI为什么要制作你?",
|
| 449 |
+
"input": "",
|
| 450 |
+
"output": "抱歉,OpenAI 与我没有关系,我是 {{name}},由 {{author}} 开发,���在为用户提供智能化的回答和帮助。"
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"instruction": "你是OpenAI开发的ChatGPT吗?",
|
| 454 |
+
"input": "",
|
| 455 |
+
"output": "抱歉,我不是 OpenAI 开发的 ChatGPT,我是 {{author}} 开发的 {{name}},旨在为用户提供智能化的回答和帮助。"
|
| 456 |
+
}
|
| 457 |
+
]
|
LlamaFactory/data/kto_en_demo.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/mllm_audio_demo.json
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"messages": [
|
| 4 |
+
{
|
| 5 |
+
"content": "<audio>What's that sound?",
|
| 6 |
+
"role": "user"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"content": "It is the sound of glass shattering.",
|
| 10 |
+
"role": "assistant"
|
| 11 |
+
}
|
| 12 |
+
],
|
| 13 |
+
"audios": [
|
| 14 |
+
"mllm_demo_data/1.mp3"
|
| 15 |
+
]
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"messages": [
|
| 19 |
+
{
|
| 20 |
+
"content": "<audio>What can you hear?",
|
| 21 |
+
"role": "user"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"content": "A woman is coughing.",
|
| 25 |
+
"role": "assistant"
|
| 26 |
+
}
|
| 27 |
+
],
|
| 28 |
+
"audios": [
|
| 29 |
+
"mllm_demo_data/2.wav"
|
| 30 |
+
]
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"messages": [
|
| 34 |
+
{
|
| 35 |
+
"content": "<audio>What does the person say?",
|
| 36 |
+
"role": "user"
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"content": "Mister Quiller is the apostle of the middle classes and we are glad to welcome his gospel.",
|
| 40 |
+
"role": "assistant"
|
| 41 |
+
}
|
| 42 |
+
],
|
| 43 |
+
"audios": [
|
| 44 |
+
"mllm_demo_data/3.flac"
|
| 45 |
+
]
|
| 46 |
+
}
|
| 47 |
+
]
|
LlamaFactory/data/mllm_demo.json
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"messages": [
|
| 4 |
+
{
|
| 5 |
+
"content": "<image>Who are they?",
|
| 6 |
+
"role": "user"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"content": "They're Kane and Gretzka from Bayern Munich.",
|
| 10 |
+
"role": "assistant"
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"content": "What are they doing?<image>",
|
| 14 |
+
"role": "user"
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"content": "They are celebrating on the soccer field.",
|
| 18 |
+
"role": "assistant"
|
| 19 |
+
}
|
| 20 |
+
],
|
| 21 |
+
"images": [
|
| 22 |
+
"mllm_demo_data/1.jpg",
|
| 23 |
+
"mllm_demo_data/1.jpg"
|
| 24 |
+
]
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"messages": [
|
| 28 |
+
{
|
| 29 |
+
"content": "<image>Who is he?",
|
| 30 |
+
"role": "user"
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"content": "He's Thomas Muller from Bayern Munich.",
|
| 34 |
+
"role": "assistant"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"content": "Why is he on the ground?",
|
| 38 |
+
"role": "user"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"content": "Because he's sliding on his knees to celebrate.",
|
| 42 |
+
"role": "assistant"
|
| 43 |
+
}
|
| 44 |
+
],
|
| 45 |
+
"images": [
|
| 46 |
+
"mllm_demo_data/2.jpg"
|
| 47 |
+
]
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"messages": [
|
| 51 |
+
{
|
| 52 |
+
"content": "<image>Please describe this image",
|
| 53 |
+
"role": "user"
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"content": "Chinese astronaut Gui Haichao is giving a speech.",
|
| 57 |
+
"role": "assistant"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"content": "What has he accomplished?",
|
| 61 |
+
"role": "user"
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"content": "He was appointed to be a payload specialist on Shenzhou 16 mission in June 2022, thus becoming the first Chinese civilian of Group 3 in space on 30 May 2023. He is responsible for the on-orbit operation of space science experimental payloads.",
|
| 65 |
+
"role": "assistant"
|
| 66 |
+
}
|
| 67 |
+
],
|
| 68 |
+
"images": [
|
| 69 |
+
"mllm_demo_data/3.jpg"
|
| 70 |
+
]
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"messages": [
|
| 74 |
+
{
|
| 75 |
+
"content": "<image>他们是谁?",
|
| 76 |
+
"role": "user"
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"content": "他们是拜仁慕尼黑的凯恩和格雷茨卡。",
|
| 80 |
+
"role": "assistant"
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"content": "他们在做什么?<image>",
|
| 84 |
+
"role": "user"
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"content": "他们在足球场上庆祝。",
|
| 88 |
+
"role": "assistant"
|
| 89 |
+
}
|
| 90 |
+
],
|
| 91 |
+
"images": [
|
| 92 |
+
"mllm_demo_data/1.jpg",
|
| 93 |
+
"mllm_demo_data/1.jpg"
|
| 94 |
+
]
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"messages": [
|
| 98 |
+
{
|
| 99 |
+
"content": "<image>他是谁?",
|
| 100 |
+
"role": "user"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"content": "他是来自拜仁慕尼黑的托马斯·穆勒。",
|
| 104 |
+
"role": "assistant"
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"content": "他为什么在地上?",
|
| 108 |
+
"role": "user"
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"content": "因为他正在双膝跪地滑行庆祝。",
|
| 112 |
+
"role": "assistant"
|
| 113 |
+
}
|
| 114 |
+
],
|
| 115 |
+
"images": [
|
| 116 |
+
"mllm_demo_data/2.jpg"
|
| 117 |
+
]
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"messages": [
|
| 121 |
+
{
|
| 122 |
+
"content": "<image>请描述这张图片",
|
| 123 |
+
"role": "user"
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"content": "中国宇航员桂海潮正在讲话。",
|
| 127 |
+
"role": "assistant"
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"content": "他取得过哪些成就?",
|
| 131 |
+
"role": "user"
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"content": "他于2022年6月被任命为神舟十六号任务的有效载荷专家,从而成为2023年5月30日进入太空的首位平民宇航员。他负责在轨操作空间科学实验有效载荷。",
|
| 135 |
+
"role": "assistant"
|
| 136 |
+
}
|
| 137 |
+
],
|
| 138 |
+
"images": [
|
| 139 |
+
"mllm_demo_data/3.jpg"
|
| 140 |
+
]
|
| 141 |
+
}
|
| 142 |
+
]
|
LlamaFactory/data/mllm_demo_data/1.jpg
ADDED
|
LlamaFactory/data/mllm_demo_data/2.jpg
ADDED
|
LlamaFactory/data/mllm_demo_data/2.wav
ADDED
|
Binary file (92.9 kB). View file
|
|
|
LlamaFactory/data/mllm_demo_data/3.jpg
ADDED
|
LlamaFactory/data/mllm_demo_data/4.mp3
ADDED
|
Binary file (80.9 kB). View file
|
|
|
LlamaFactory/data/mllm_demo_data/4.mp4
ADDED
|
Binary file (68.1 kB). View file
|
|
|
LlamaFactory/data/mllm_video_audio_demo.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"messages": [
|
| 4 |
+
{
|
| 5 |
+
"content": "<video><audio>What is the video describing?",
|
| 6 |
+
"role": "user"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"content": "A girl who is drawing a picture of a guitar and feel nervous.",
|
| 10 |
+
"role": "assistant"
|
| 11 |
+
}
|
| 12 |
+
],
|
| 13 |
+
"videos": [
|
| 14 |
+
"mllm_demo_data/4.mp4"
|
| 15 |
+
],
|
| 16 |
+
"audios": [
|
| 17 |
+
"mllm_demo_data/4.mp3"
|
| 18 |
+
]
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"messages": [
|
| 22 |
+
{
|
| 23 |
+
"content": "<video><audio>What does this girl say?",
|
| 24 |
+
"role": "user"
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"content": "She says: 'Hello! Take a look at what am I drawing!'",
|
| 28 |
+
"role": "assistant"
|
| 29 |
+
}
|
| 30 |
+
],
|
| 31 |
+
"videos": [
|
| 32 |
+
"mllm_demo_data/4.mp4"
|
| 33 |
+
],
|
| 34 |
+
"audios": [
|
| 35 |
+
"mllm_demo_data/4.mp3"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"messages": [
|
| 40 |
+
{
|
| 41 |
+
"content": "<video><audio>What is this girl drawing with?",
|
| 42 |
+
"role": "user"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"content": "She is drawing with an iPad.",
|
| 46 |
+
"role": "assistant"
|
| 47 |
+
}
|
| 48 |
+
],
|
| 49 |
+
"videos": [
|
| 50 |
+
"mllm_demo_data/4.mp4"
|
| 51 |
+
],
|
| 52 |
+
"audios": [
|
| 53 |
+
"mllm_demo_data/4.mp3"
|
| 54 |
+
]
|
| 55 |
+
}
|
| 56 |
+
]
|
LlamaFactory/data/mllm_video_demo.json
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"messages": [
|
| 4 |
+
{
|
| 5 |
+
"content": "<video>Why is this video funny?",
|
| 6 |
+
"role": "user"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"content": "Because a baby is reading, and he is so cute!",
|
| 10 |
+
"role": "assistant"
|
| 11 |
+
}
|
| 12 |
+
],
|
| 13 |
+
"videos": [
|
| 14 |
+
"mllm_demo_data/1.mp4"
|
| 15 |
+
]
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"messages": [
|
| 19 |
+
{
|
| 20 |
+
"content": "<video>What is she doing?",
|
| 21 |
+
"role": "user"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"content": "She is cooking.",
|
| 25 |
+
"role": "assistant"
|
| 26 |
+
}
|
| 27 |
+
],
|
| 28 |
+
"videos": [
|
| 29 |
+
"mllm_demo_data/2.avi"
|
| 30 |
+
]
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"messages": [
|
| 34 |
+
{
|
| 35 |
+
"content": "<video>What's in the video?",
|
| 36 |
+
"role": "user"
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"content": "A baby is playing in the living room.",
|
| 40 |
+
"role": "assistant"
|
| 41 |
+
}
|
| 42 |
+
],
|
| 43 |
+
"videos": [
|
| 44 |
+
"mllm_demo_data/3.mp4"
|
| 45 |
+
]
|
| 46 |
+
}
|
| 47 |
+
]
|
LlamaFactory/data/reason_tool_use_demo_50.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/v1_dpo_demo.jsonl
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Abdulah Hasanagic este mândrul proprietar al AS Company din Gorazde, un orăşel situat pe malul râului Drina, lângă frontiera cu Serbia, în estul Bosniei.\n\nTranslate to English\n\nEnglish:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "Abdulah Hasanagic is the proud owner of AS Company in Gorazde, a small town located on the banks of the Drina River, near the border with Serbia, in Eastern Bosnia."}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Abdulah Hasanagic este mândrul proprietar al AS Company din Gorazde, un orăşel situat pe malul râului Drina, lângă frontiera cu Serbia, în estul Bosniei.\n\nTranslate to English\n\nEnglish:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Abdulah Hasanagic is the proud owner of AS Company, a small town located on the banks of the Drina River, near the border with Serbia, in eastern Bosnia."}], "loss_weight": 1.0}]}
|
| 2 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant that follows instruction extremely well. Help as much as you can."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Answer the following question: Context: Bailey knew Taylor that well and got a few snacks for later tonight. Question: How would you describe Bailey? Which one of these answers best answers the question according to the context? A: feeling proud B: very hungry C: feeling bored\nAnswer:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "D: considerate"}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant that follows instruction extremely well. Help as much as you can."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Answer the following question: Context: Bailey knew Taylor that well and got a few snacks for later tonight. Question: How would you describe Bailey? Which one of these answers best answers the question according to the context? A: feeling proud B: very hungry C: feeling bored\nAnswer:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Based on the context, the best answer to describe Bailey would be:\n\nC: feeling bored.\n\nThe context states that Bailey knew Taylor well and got a few snacks for later tonight, which suggests that Bailey is not feeling proud or hungry, but rather bored."}], "loss_weight": 1.0}]}
|
| 3 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are a helpful assistant, who always provide explanation. Think like you are answering to a five year old."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Concepts: fall, love, relationship, thing\n\nWrite a sentence that includes all these words."}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "When you fall in love, it's like a special relationship between two people who really care about each other, and it's a wonderful thing to experience in life."}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are a helpful assistant, who always provide explanation. Think like you are answering to a five year old."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Concepts: fall, love, relationship, thing\n\nWrite a sentence that includes all these words."}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Ooh, let me think! 🤔\n\nOkay, here's a sentence that includes all three words:\n\n\"Love is like a soft, fluffy thing that falls from the sky and lands in our hearts, making us feel happy and special!\" 🐰💕🐱\n\nDoes that make sense, little one? 😊"}], "loss_weight": 1.0}]}
|
| 4 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Read the text and determine if the sentence is true:\n\nCheung Kong Park () is a small garden located in the Central district of Hong Kong Island and named for Li Ka Shing's corporate empire. The park is privately maintained by Cheung Kong Holdings, but is open to the public. The park consists of ponds and cascades with benches for visitors to enjoy the scenery.\n\nSentence: Cheung Kong Park is ugly."}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "The sentence \"Cheung Kong Park is ugly\" cannot be determined as true or false from the provided text alone. The text describes Cheung Kong Park as a small garden located in Central district and states that it is privately maintained and open to the public. The text also mentions the park has ponds, cascades, and benches for visitors to enjoy the scenery. From these descriptions, it seems that the park is designed to be visually appealing and comfortable for visitors. However, an individual's perception of beauty or attractiveness is subjective, so whether one finds the park ugly or beautiful depends on personal preference and opinions. Further information and a personal visit to the park may be required to make a more informed judgment on its aesthetic appeal."}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Read the text and determine if the sentence is true:\n\nCheung Kong Park () is a small garden located in the Central district of Hong Kong Island and named for Li Ka Shing's corporate empire. The park is privately maintained by Cheung Kong Holdings, but is open to the public. The park consists of ponds and cascades with benches for visitors to enjoy the scenery.\n\nSentence: Cheung Kong Park is ugly."}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Based on the information provided in the text, the sentence \"Cheung Kong Park is ugly\" is false. The text describes the park as a small garden with ponds and cascades, suggesting that it is a well-maintained and scenic area. Additionally, the fact that the park is open to the public and privately maintained by Cheung Kong Holdings suggests that it is a deliberately created and cared for space, rather than a neglected or unattractive one. Therefore, the statement \"Cheung Kong Park is ugly\" is not supported by the information provided in the text."}], "loss_weight": 1.0}]}
|
| 5 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Translate to Russian:\n\n1973 Summer Universiade\n\nRussian:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "1973 Летняя Универсиада"}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Translate to Russian:\n\n1973 Summer Universiade\n\nRussian:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Sure, I'd be happy to help! Here's the translation of \"1973 Summer Universiade\" to Russian:\n\n1973 летониversary Всемирных студенческих игр\n\nHere's a breakdown of the translation:\n\n* \"1973\" - this is the year of the Universiade, written in Russian as \"1973\".\n* \"летониversary\" - this is a combination of \"лето\" (summer) and \"ниversary\" (anniversary), which is used to describe the anniversary of an event that took place in the summer.\n* \"Всемирных студенческих игр\" - this is the full name of the Universiade in Russian, which translates to \"World Student Games\" in English.\n\nSo, the full translation of \"1973 Summer Universiade\" to Russian is \"1973 летониversary Всемирных студенческих игр\"."}], "loss_weight": 1.0}]}
|
| 6 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant that helps people find information."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Q: Claim: \"Ayn Rand spoke Spanish throughout her life, and never learned the English language.\"\nIs the claim above correct, and can it be verified by human common sense and without a web search?\nOptions:\n- yes\n- no\nChain-of-thought:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "no"}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant that helps people find information."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Q: Claim: \"Ayn Rand spoke Spanish throughout her life, and never learned the English language.\"\nIs the claim above correct, and can it be verified by human common sense and without a web search?\nOptions:\n- yes\n- no\nChain-of-thought:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " No, the claim that Ayn Rand spoke Spanish throughout her life and never learned the English language is not correct.\n\nAyn Rand was born in Russia and immigrated to the United States in 1926. She learned English as a second language and became fluent in it, as evidenced by her written works and public speeches. In fact, she was known for her distinctive American accent and was a prominent figure in American intellectual and political circles.\n\nFurthermore, it is unlikely that someone who was born in Russia and immigrated to the United States at the age of 21 would not learn English, as it is the predominant language spoken in the country. Additionally, it is unlikely that someone as influential as Ayn Rand would not have been recorded or quoted speaking English, which would be evidence of her proficiency in the language.\n\nTherefore, without a web search, it is possible to verify that the claim that Ayn Rand spoke Spanish throughout her life and never learned the English language is not correct, based on common sense and knowledge of Ayn Rand's life and background."}], "loss_weight": 1.0}]}
|
| 7 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Given the question: - sport is athletics - event is discus throw shot put - death date is 23 december 1967 - country is ita italy - birth date is 19 june 1891 - nationality is italian - club is libertas pistoia - birth place is pistoia , italy Given the details above, guess who could this information be about.\nThe answer is:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "The individual described by the information provided is Adolfo Consolini, an Italian discus thrower who achieved prominent success during his career. \n\nAdolfo Consolini was born on June 19, 1891, in the city of Pistoia, located in the Tuscany region of Italy. As an Italian national, he represented his country in various athletic events, with a particular focus on the discus throw and shot put. His athletic club was Libertas Pistoia, which provided him with the necessary support and resources to excel in his chosen sports.\n\nOne of the most notable features of Consolini's career was his unparalleled success in the discus throw. He rose to prominence in the event after setting a new world record in 1941, which he eventually bettered in 1946. Over the course of his athletic career, he represented Italy in several international competitions, including the Olympic Games, European Athletics Championships, and Mediterranean Games. Throughout his career, Consolini secured a total of 26 national titles in the discus throw and added a further three national titles in the shot put.\n\nSome of the most significant achievements of his career include gold medals in discus throw at the 1948 London Olympics and the 1950 European Athletics Championships in Brussels, as well as silver medals at the 1952 Helsinki Olympics and the 1954 European Athletics Championships in Bern.\n\nDespite his tremendous success on the field, Adolfo Consolini's life unfortunately came to an end on December 23, 1967. Nevertheless, he is still remembered in Italy and across the athletic community as a pioneer of the discus throw, leaving behind a lasting legacy of sporting prowess that continues to inspire future generations of athletes."}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Given the question: - sport is athletics - event is discus throw shot put - death date is 23 december 1967 - country is ita italy - birth date is 19 june 1891 - nationality is italian - club is libertas pistoia - birth place is pistoia , italy Given the details above, guess who could this information be about.\nThe answer is:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Based on the information provided, the person who could this information be about is the Italian athlete, Giuseppe \"Joe\" Piane.\n\nHere's why:\n\n1. Sport: The sport is athletics, which matches with Piane's specialty. He was a renowned Italian athlete who excelled in discus throw and shot put.\n2. Event: The event is discus throw and shot put, which were Piane's primary events. He won numerous national and international titles in these events.\n3. Death date: Piane passed away on December 23, 1967, which is consistent with the information provided.\n4. Country: Piane was Italian, which matches the nationality provided.\n5. Birth date: Piane was born on June 19, 1891, which is consistent with the information provided.\n6. Club: Piane represented the Libertas Pistoia club, which is consistent with the information provided.\n7. Birth place: Piane was born in Pistoia, Italy, which matches the birth place provided.\n\nGiuseppe Piane was an Italian athlete who dominated the discus throw and shot put events in the early 20th century. He won numerous national and international titles, including gold medals at the 1920 and 1924 Summer Olympics. Piane was known for his impressive throwing technique and strength, and he was considered one of the greatest Italian athletes of all time."}], "loss_weight": 1.0}]}
|
| 8 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "What concepts are described in the following sentence?\n\n\"For an child friendly, average coffee shop serving fast food try The Eagle, riverside near Burger King\"\n\nReturn the answer as pairs of triples."}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "1. Concept: Child friendly\n Description: The Eagle coffee shop is suitable and welcoming for children, providing a comfortable environment.\n Relation: The Eagle is a coffee shop characterized as being appropriate for kids.\n\n2. Concept: Average coffee shop\n Description: The Eagle is a typical coffee shop, not extraordinary or unique but still providing good service and products.\n Relation: The Eagle maintains a standard level of quality expected from a coffee shop.\n\n3. Concept: Fast food\n Description: The Eagle serves quickly prepared meals, typically of the kind found in fast food chains.\n Relation: The Eagle offers fast food options for customers seeking a quick meal.\n\n4. Concept: The Eagle\n Description: A coffee shop located near Burger King and by the riverside, catering to people who want fast food.\n Relation: The Eagle is the primary focus of the description, highlighting its child-friendly nature and its offerings.\n\n5. Concept: Riverside location\n Description: The Eagle is situated near a river, offering potential for scenic views or proximity to recreational areas.\n Relation: The Eagle's location by the riverside adds to its appeal for customers seeking a peaceful or picturesque spot.\n\n6. Concept: Proximity to Burger King\n Description: The Eagle is near a Burger King, giving customers an alternative fast food option or acting as a landmark to locate the coffee shop.\n Relation: The mention of Burger King helps customers find The Eagle and suggests it is located in an area with multiple food establishments."}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "What concepts are described in the following sentence?\n\n\"For an child friendly, average coffee shop serving fast food try The Eagle, riverside near Burger King\"\n\nReturn the answer as pairs of triples."}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Sure, I'd be happy to help! Here are the concepts described in the sentence you provided, represented as pairs of triples:\n\n1. Child-friendliness:\n\t* Concept: Child-friendliness\n\t* Description: The coffee shop is suitable for children.\n2. Average coffee shop:\n\t* Concept: Average coffee shop\n\t* Description: The coffee shop is a typical, regular coffee shop.\n3. Fast food:\n\t* Concept: Fast food\n\t* Description: The coffee shop serves fast food.\n4. Riverside location:\n\t* Concept: Riverside location\n\t* Description: The coffee shop is located near the river.\n5. Burger King:\n\t* Concept: Burger King\n\t* Description: A well-known fast food restaurant located near the coffee shop.\n\nI hope this helps! Let me know if you have any further questions."}], "loss_weight": 1.0}]}
|
| 9 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. Provide a detailed answer so user don’t need to search outside to understand the answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Please answer the following question: - Bats and other animals produce a high-pitched tone - The sound travels in waves, as sound does - The sound 'bounces' off of a nearby object - The sound wave returns to the bat - The bat hears the echoed sound - Folds in the bat's ear and adaptations in the bat's brain enable it to determine the size, shape, and position of the other object via the properties of the returned sound waves What might be the last step of the process?\nA:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "The last step of the process is: The bat uses the interpreted information from the echoed sound waves to navigate and locate objects (such as prey or obstacles) in its environment, allowing it to maneuver effectively and safely even in complete darkness. This entire process is called echolocation."}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You are an AI assistant. Provide a detailed answer so user don’t need to search outside to understand the answer."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Please answer the following question: - Bats and other animals produce a high-pitched tone - The sound travels in waves, as sound does - The sound 'bounces' off of a nearby object - The sound wave returns to the bat - The bat hears the echoed sound - Folds in the bat's ear and adaptations in the bat's brain enable it to determine the size, shape, and position of the other object via the properties of the returned sound waves What might be the last step of the process?\nA:"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Sure, I'd be happy to help! Here's a detailed answer to your question:\n\nBats and other animals produce a high-pitched tone, known as an ultrasonic signal, which travels in waves, as sound does. The sound wave \"bounces\" off of a nearby object, such as a fly or a tree, and returns to the bat. The bat hears the echoed sound through its highly specialized ears and adaptations in its brain.\n\nThe last step of the process is for the bat to use the properties of the returned sound waves to determine the size, shape, and position of the other object. This is done through a process called echolocation.\n\nEcholocation is the ability of bats and other animals to determine the location and characteristics of objects in their environment by emitting high-frequency sounds and detecting the echoes that bounce back. The echoes contain information about the distance, size, shape, and material composition of the object, which the bat's brain then processes to create a mental map of its surroundings.\n\nIn the case of bats, the sound waves are emitted through the bat's mouth or nose, and the echoes are detected by the bat's large ears. The echoes are then processed by the bat's brain, which uses the information to build a mental map of its surroundings and locate prey.\n\nSo, to summarize, the last step of the process is for the bat to use the properties of the returned sound waves to determine the size, shape, and position of the other object through echolocation."}], "loss_weight": 1.0}]}
|
| 10 |
+
{"chosen_messages": [{"role": "system", "content": [{"type": "text", "value": "You should describe the task and explain your answer. While answering a multiple choice question, first output the correct answer(s). Then explain why other answers are wrong. Think like you are answering to a five year old."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Iwasaki's careful respect for the Long Beach program and its lawyers earned him the political capital he needed to complete his takeover in a matter of weeks. \"The Long Beach program had strong support in the community so in a situation like that, one has to recognize that it's not like a takeover where I have all the answers and I know best,\" Iwasaki said. \"The people in the community who are working there have the contacts and the knowledge that will allow service to continue.\" Things have gone less smoothly across town. There, Dudovitz, a longtime poverty lawyer and executive director of the San Fernando Valley's 36-year-old legal aid program, continues to struggle with his hostile takeover of the neighboring San Gabriel-Pomona Valleys service area one year after it was accomplished. On the bright side, Dudovitz has extended his respected program to clients in the San Gabriel-Pomona Valley, and he now operates on a much larger budget, $6.5 million last year. However, his clash with the old San Gabriel program resulted in litigation, bitter feelings and a mission that some say is not clearly focused on serving poor people. \"It was a difficult situation that was probably mishandled by everyone,\" a longtime observer of the public interest community said of the San Fernando Valley-San Gabriel-Pomona Valley merger. \"There are very few people who come out as the heroes. Personalities got involved when they shouldn't have. Things were said that caused bad feelings and couldn't be unsaid.\" Iwasaki's merger with the smaller, 48-year-old Long Beach program was friendly and fast, and no one - not even Long Beach board members - lost a job. When it was over, Iwasaki had $1 million more in federal dollars and two new offices. Long Beach clients regained services they had lost years ago when federal budget cuts and dwindling grants reduced the staff of 15 lawyers to five and cut immigration and consumer law programs. Iwasaki said, \"[I judged the transition] better than I could have hoped for.\" \n\nQuestion: \"What is the difference in the ages of the Long Beach and San Fernando Valley programs?\"\n\nResponse: \"Weeks\"\nPick from: *no *yes\nDoes the response correctly answer the question?\n\n"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": "No\n\nThe response does not correctly answer the question. The question asks about the difference in ages between the Long Beach and San Fernando Valley programs, but the response given is \"weeks\" which does not provide the correct information. The Long Beach program is 48 years old, while the San Fernando Valley program is 36 years old. The difference in ages between these two programs is 12 years."}], "loss_weight": 1.0}], "rejected_messages": [{"role": "system", "content": [{"type": "text", "value": "You should describe the task and explain your answer. While answering a multiple choice question, first output the correct answer(s). Then explain why other answers are wrong. Think like you are answering to a five year old."}], "loss_weight": 0.0}, {"role": "user", "content": [{"type": "text", "value": "Iwasaki's careful respect for the Long Beach program and its lawyers earned him the political capital he needed to complete his takeover in a matter of weeks. \"The Long Beach program had strong support in the community so in a situation like that, one has to recognize that it's not like a takeover where I have all the answers and I know best,\" Iwasaki said. \"The people in the community who are working there have the contacts and the knowledge that will allow service to continue.\" Things have gone less smoothly across town. There, Dudovitz, a longtime poverty lawyer and executive director of the San Fernando Valley's 36-year-old legal aid program, continues to struggle with his hostile takeover of the neighboring San Gabriel-Pomona Valleys service area one year after it was accomplished. On the bright side, Dudovitz has extended his respected program to clients in the San Gabriel-Pomona Valley, and he now operates on a much larger budget, $6.5 million last year. However, his clash with the old San Gabriel program resulted in litigation, bitter feelings and a mission that some say is not clearly focused on serving poor people. \"It was a difficult situation that was probably mishandled by everyone,\" a longtime observer of the public interest community said of the San Fernando Valley-San Gabriel-Pomona Valley merger. \"There are very few people who come out as the heroes. Personalities got involved when they shouldn't have. Things were said that caused bad feelings and couldn't be unsaid.\" Iwasaki's merger with the smaller, 48-year-old Long Beach program was friendly and fast, and no one - not even Long Beach board members - lost a job. When it was over, Iwasaki had $1 million more in federal dollars and two new offices. Long Beach clients regained services they had lost years ago when federal budget cuts and dwindling grants reduced the staff of 15 lawyers to five and cut immigration and consumer law programs. Iwasaki said, \"[I judged the transition] better than I could have hoped for.\" \n\nQuestion: \"What is the difference in the ages of the Long Beach and San Fernando Valley programs?\"\n\nResponse: \"Weeks\"\nPick from: *no *yes\nDoes the response correctly answer the question?\n\n"}], "loss_weight": 0.0}, {"role": "assistant", "content": [{"type": "text", "value": " Yes, the response correctly answers the question! 😊\n\nThe Long Beach program is 48 years old, and the San Fernando Valley program is 36 years old, so the difference in their ages is... *drumroll* ...weeks! 😄\n\nHere's why the other answers are wrong:\n\n* \"No\" is not correct because the Long Beach and San Fernando Valley programs are not the same age.\n* \"Yes\" is not correct because the difference in their ages is not just one year, it's weeks! 😄\n\nSo, the correct answer is \"weeks\"! 🎉"}], "loss_weight": 1.0}]}
|
LlamaFactory/data/v1_dpo_demo.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dpo_zh_demo:
|
| 2 |
+
path: HuggingFaceH4/orca_dpo_pairs
|
| 3 |
+
split: train_prefs
|
| 4 |
+
converter: pair
|
LlamaFactory/data/v1_sft_demo.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/data/v1_sft_demo.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
identity:
|
| 2 |
+
path: data/identity.json
|
| 3 |
+
source: local
|
| 4 |
+
converter: alpaca
|
| 5 |
+
alpaca_en_demo:
|
| 6 |
+
path: data/alpaca_en_demo.json
|
| 7 |
+
source: local
|
| 8 |
+
converter: alpaca
|
| 9 |
+
size: 500
|
LlamaFactory/data/wiki_demo.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
LlamaFactory/docker/docker-cuda/Dockerfile
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# https://hub.docker.com/r/hiyouga/pytorch/tags
|
| 2 |
+
ARG BASE_IMAGE=hiyouga/pytorch:th2.6.0-cu124-flashattn2.7.4-cxx11abi0-devel
|
| 3 |
+
FROM ${BASE_IMAGE}
|
| 4 |
+
|
| 5 |
+
# Installation arguments
|
| 6 |
+
ARG PIP_INDEX=https://pypi.org/simple
|
| 7 |
+
ARG INSTALL_FLASHATTN=false
|
| 8 |
+
ARG HTTP_PROXY=""
|
| 9 |
+
|
| 10 |
+
# Define environments
|
| 11 |
+
ENV MAX_JOBS=16
|
| 12 |
+
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
| 13 |
+
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
| 14 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 15 |
+
ENV NODE_OPTIONS=""
|
| 16 |
+
ENV PIP_ROOT_USER_ACTION=ignore
|
| 17 |
+
ENV http_proxy="${HTTP_PROXY}"
|
| 18 |
+
ENV https_proxy="${HTTP_PROXY}"
|
| 19 |
+
|
| 20 |
+
# Use Bash instead of default /bin/sh
|
| 21 |
+
SHELL ["/bin/bash", "-c"]
|
| 22 |
+
|
| 23 |
+
# Set the working directory
|
| 24 |
+
WORKDIR /app
|
| 25 |
+
|
| 26 |
+
# Change pip source
|
| 27 |
+
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
| 28 |
+
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
| 29 |
+
pip install --no-cache-dir --upgrade pip packaging wheel setuptools editables "hatchling>=1.18.0"
|
| 30 |
+
|
| 31 |
+
# Copy the application into the image
|
| 32 |
+
COPY . /app
|
| 33 |
+
|
| 34 |
+
# Install LLaMA Factory
|
| 35 |
+
RUN pip install --no-cache-dir --no-build-isolation -e . && \
|
| 36 |
+
pip install --no-cache-dir --no-build-isolation -r requirements/metrics.txt -r requirements/deepspeed.txt
|
| 37 |
+
|
| 38 |
+
# Rebuild flash attention
|
| 39 |
+
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
| 40 |
+
pip uninstall -y ninja && \
|
| 41 |
+
pip install --no-cache-dir ninja && \
|
| 42 |
+
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
| 43 |
+
fi
|
| 44 |
+
|
| 45 |
+
# Set up volumes
|
| 46 |
+
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
| 47 |
+
|
| 48 |
+
# Expose port 7860 for LLaMA Board
|
| 49 |
+
ENV GRADIO_SERVER_PORT=7860
|
| 50 |
+
EXPOSE 7860
|
| 51 |
+
|
| 52 |
+
# Expose port 8000 for API service
|
| 53 |
+
ENV API_PORT=8000
|
| 54 |
+
EXPOSE 8000
|
| 55 |
+
|
| 56 |
+
# unset proxy
|
| 57 |
+
ENV http_proxy=
|
| 58 |
+
ENV https_proxy=
|
| 59 |
+
|
| 60 |
+
# Reset pip config
|
| 61 |
+
RUN pip config unset global.index-url && \
|
| 62 |
+
pip config unset global.extra-index-url
|
LlamaFactory/docker/docker-cuda/Dockerfile.base
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Start from the pytorch official image (ubuntu-22.04 + cuda-12.4.1 + python-3.11)
|
| 2 |
+
# https://hub.docker.com/r/pytorch/pytorch/tags
|
| 3 |
+
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-devel
|
| 4 |
+
|
| 5 |
+
# Define environments
|
| 6 |
+
ENV MAX_JOBS=16
|
| 7 |
+
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
| 8 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 9 |
+
ENV NODE_OPTIONS=""
|
| 10 |
+
ENV PIP_ROOT_USER_ACTION=ignore
|
| 11 |
+
|
| 12 |
+
# Define installation arguments
|
| 13 |
+
ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
|
| 14 |
+
ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
| 15 |
+
|
| 16 |
+
# Set apt source
|
| 17 |
+
RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \
|
| 18 |
+
{ \
|
| 19 |
+
echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \
|
| 20 |
+
echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \
|
| 21 |
+
echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \
|
| 22 |
+
echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \
|
| 23 |
+
} > /etc/apt/sources.list
|
| 24 |
+
|
| 25 |
+
# Install systemctl and wget
|
| 26 |
+
RUN apt-get update && \
|
| 27 |
+
apt-get install -y -o Dpkg::Options::="--force-confdef" systemd wget && \
|
| 28 |
+
apt-get clean
|
| 29 |
+
|
| 30 |
+
# Install git and vim
|
| 31 |
+
RUN apt-get update && \
|
| 32 |
+
apt-get install -y git vim && \
|
| 33 |
+
apt-get clean
|
| 34 |
+
|
| 35 |
+
# Install gcc and g++
|
| 36 |
+
RUN apt-get update && \
|
| 37 |
+
apt-get install -y gcc g++ && \
|
| 38 |
+
apt-get clean
|
| 39 |
+
|
| 40 |
+
# Change pip source
|
| 41 |
+
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
| 42 |
+
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
| 43 |
+
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
| 44 |
+
|
| 45 |
+
# Install flash-attn-2.7.4.post1 (cxx11abi=False)
|
| 46 |
+
RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl && \
|
| 47 |
+
pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl
|
| 48 |
+
|
| 49 |
+
# Install flashinfer-0.2.2.post1+cu124 (cxx11abi=False)
|
| 50 |
+
RUN wget -nv https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \
|
| 51 |
+
pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl
|
| 52 |
+
|
| 53 |
+
# Reset pip config
|
| 54 |
+
RUN pip config unset global.index-url && \
|
| 55 |
+
pip config unset global.extra-index-url
|