rayraycano commited on
Commit
fcca8c8
·
verified ·
1 Parent(s): b809f37

Training in progress, step 20

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .bandit +3 -0
  2. .editorconfig +14 -0
  3. .flake8 +5 -0
  4. .gitattributes +8 -42
  5. .github/CODE_OF_CONDUCT.md +129 -0
  6. .github/CONTRIBUTING.md +76 -0
  7. .github/FUNDING.yml +13 -0
  8. .github/ISSUE_TEMPLATE/bug-report.yaml +113 -0
  9. .github/ISSUE_TEMPLATE/config.yml +7 -0
  10. .github/ISSUE_TEMPLATE/docs.yml +46 -0
  11. .github/ISSUE_TEMPLATE/feature-request.yaml +63 -0
  12. .github/PULL_REQUEST_TEMPLATE.md +27 -0
  13. .github/SECURITY.md +9 -0
  14. .github/SUPPORT.md +10 -0
  15. .github/release-drafter.yml +31 -0
  16. .github/workflows/base.yml +80 -0
  17. .github/workflows/docs.yml +34 -0
  18. .github/workflows/lint.yml +24 -0
  19. .github/workflows/main.yml +176 -0
  20. .github/workflows/multi-gpu-e2e.yml +73 -0
  21. .github/workflows/nightlies.yml +119 -0
  22. .github/workflows/precommit-autoupdate.yml +49 -0
  23. .github/workflows/pypi.yml +60 -0
  24. .github/workflows/tests-nightly.yml +139 -0
  25. .github/workflows/tests.yml +279 -0
  26. .gitignore +192 -0
  27. .isort.cfg +3 -0
  28. .mypy.ini +54 -0
  29. .pre-commit-config.yaml +45 -0
  30. .pylintrc +15 -0
  31. FAQS.md +7 -0
  32. LICENSE +202 -0
  33. MANIFEST.in +5 -0
  34. README.md +3 -56
  35. TODO.md +10 -0
  36. adapter_config.json +5 -5
  37. adapter_model.safetensors +1 -1
  38. checkpointing_lws.yml +650 -0
  39. cicd/Dockerfile.jinja +52 -0
  40. cicd/cicd.sh +12 -0
  41. cicd/multigpu.py +82 -0
  42. cicd/multigpu.sh +5 -0
  43. cicd/tests.py +82 -0
  44. config.py +53 -0
  45. deepspeed_config.yml +21 -0
  46. deepspeed_configs/zero1.json +23 -0
  47. deepspeed_configs/zero1_torch_compile.json +27 -0
  48. deepspeed_configs/zero2.json +27 -0
  49. deepspeed_configs/zero3.json +31 -0
  50. deepspeed_configs/zero3_bf16.json +22 -0
.bandit ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [bandit]
2
+ exclude = tests
3
+ skips = B101
.editorconfig ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ root = true
2
+
3
+ [*]
4
+ end_of_line = lf
5
+ insert_final_newline = true
6
+ trim_trailing_whitespace = true
7
+
8
+ [*.py]
9
+ indent_style = space
10
+ indent_size = 4
11
+
12
+ [**.yml]
13
+ indent_style = space
14
+ indent_size = 2
.flake8 ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 88
3
+
4
+ select = C,E,F,W,B,B950
5
+ extend-ignore = E203, E501, W503
.gitattributes CHANGED
@@ -1,43 +1,9 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
- blobs/09d433f650646834a83c580877bd60c6d1f88f7755305c12576b5c7058f9af15 filter=lfs diff=lfs merge=lfs -text
38
- blobs/2b1879f356aed350030bb40eb45ad362c89d9891096f79a3ab323d3ba5607668 filter=lfs diff=lfs merge=lfs -text
39
- blobs/92ecfe1a2414458b4821ac8c13cf8cb70aed66b5eea8dc5ad9eeb4ff309d6d7b filter=lfs diff=lfs merge=lfs -text
40
- blobs/ab33d910f405204e5d388bc3521503584800461dc96808e287821dd451c1edac filter=lfs diff=lfs merge=lfs -text
41
- blobs/fc1cdddd6bfa91128d6e94ee73d0ce62bfcdb7af29e978ddcab30c66ae9ea7fa filter=lfs diff=lfs merge=lfs -text
42
- 10gb_file filter=lfs diff=lfs merge=lfs -text
43
- 3gb_file.dat filter=lfs diff=lfs merge=lfs -text
 
1
+ data/*.jsonl filter=lfs diff=lfs merge=lfs -text
2
+ adapter_model.safetensors filter=lfs diff=lfs merge=lfs -text
3
+ docs/images/4d-mask.png filter=lfs diff=lfs merge=lfs -text
4
+ docs/images/ray-cluster-dashboard.png filter=lfs diff=lfs merge=lfs -text
5
+ image/axolotl.png filter=lfs diff=lfs merge=lfs -text
6
+ image/sticker_fixed.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
8
+ tokenizer.model filter=lfs diff=lfs merge=lfs -text
9
+ training_args.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
.github/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to make participation in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, religion, or sexual identity
10
+ and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community include:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the
26
+ overall community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or
31
+ advances of any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email
35
+ address, without their explicit permission
36
+ * Other conduct which could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported to the community leaders responsible for enforcement on Discord
63
+ at https://discord.gg/QYF8QrtEUm
64
+
65
+ All complaints will be reviewed and investigated promptly and fairly.
66
+
67
+ All community leaders are obligated to respect the privacy and security of the
68
+ reporter of any incident.
69
+
70
+ ## Enforcement Guidelines
71
+
72
+ Community leaders will follow these Community Impact Guidelines in determining
73
+ the consequences for any action they deem in violation of this Code of Conduct:
74
+
75
+ ### 1. Correction
76
+
77
+ **Community Impact**: Use of inappropriate language or other behavior deemed
78
+ unprofessional or unwelcome in the community.
79
+
80
+ **Consequence**: A private, written warning from community leaders, providing
81
+ clarity around the nature of the violation and an explanation of why the
82
+ behavior was inappropriate. A public apology may be requested.
83
+
84
+ ### 2. Warning
85
+
86
+ **Community Impact**: A violation through a single incident or series
87
+ of actions.
88
+
89
+ **Consequence**: A warning with consequences for continued behavior. No
90
+ interaction with the people involved, including unsolicited interaction with
91
+ those enforcing the Code of Conduct, for a specified period of time. This
92
+ includes avoiding interactions in community spaces as well as external channels
93
+ like social media. Violating these terms may lead to a temporary or
94
+ permanent ban.
95
+
96
+ ### 3. Temporary Ban
97
+
98
+ **Community Impact**: A serious violation of community standards, including
99
+ sustained inappropriate behavior.
100
+
101
+ **Consequence**: A temporary ban from any sort of interaction or public
102
+ communication with the community for a specified period of time. No public or
103
+ private interaction with the people involved, including unsolicited interaction
104
+ with those enforcing the Code of Conduct, is allowed during this period.
105
+ Violating these terms may lead to a permanent ban.
106
+
107
+ ### 4. Permanent Ban
108
+
109
+ **Community Impact**: Demonstrating a pattern of violation of community
110
+ standards, including sustained inappropriate behavior, harassment of an
111
+ individual, or aggression toward or disparagement of classes of individuals.
112
+
113
+ **Consequence**: A permanent ban from any sort of public interaction within
114
+ the community.
115
+
116
+ ## Attribution
117
+
118
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119
+ version 2.0, available at
120
+ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
121
+
122
+ Community Impact Guidelines were inspired by [Mozilla's code of conduct
123
+ enforcement ladder](https://github.com/mozilla/diversity).
124
+
125
+ [homepage]: https://www.contributor-covenant.org
126
+
127
+ For answers to common questions about this code of conduct, see the FAQ at
128
+ https://www.contributor-covenant.org/faq. Translations are available at
129
+ https://www.contributor-covenant.org/translations.
.github/CONTRIBUTING.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to axolotl
2
+
3
+ First of all, thank you for your interest in contributing to axolotl! We appreciate the time and effort you're willing to invest in making our project better. This document provides guidelines and information to make the contribution process as smooth as possible.
4
+
5
+ ## Table of Contents
6
+
7
+ - [Code of Conduct](#code-of-conduct)
8
+ - [Getting Started](#getting-started)
9
+ - [How to Contribute](#how-to-contribute)
10
+ - [Reporting Bugs](#reporting-bugs)
11
+ - [Suggesting Enhancements](#suggesting-enhancements)
12
+ - [Submitting Pull Requests](#submitting-pull-requests)
13
+ - [Style Guidelines](#style-guidelines)
14
+ - [Code Style](#code-style)
15
+ - [Commit Messages](#commit-messages)
16
+ - [Additional Resources](#additional-resources)
17
+
18
+ ## Code of Conduct
19
+
20
+ All contributors are expected to adhere to our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it before participating in the axolotl community.
21
+
22
+ ## Getting Started
23
+
24
+ Bugs? Please check for open issue else create a new [Issue](https://github.com/axolotl-ai-cloud/axolotl/issues/new).
25
+
26
+ PRs are **greatly welcome**!
27
+
28
+ 1. Fork the repository and clone it to your local machine.
29
+ 2. Set up the development environment by following the instructions in the [README.md](https://github.com/axolotl-ai-cloud/axolotl/tree/main/README.md) file.
30
+ 3. Explore the codebase, run tests, and verify that everything works as expected.
31
+
32
+ Please run below to setup env
33
+ ```bash
34
+ pip3 install -r requirements-dev.txt -r requirements-tests.txt
35
+ pre-commit install
36
+
37
+ # test
38
+ pytest tests/
39
+ ```
40
+
41
+ ## How to Contribute
42
+
43
+ ### Reporting Bugs
44
+
45
+ If you encounter a bug or issue while using axolotl, please open a new issue on the [GitHub Issues](https://github.com/axolotl-ai-cloud/axolotl/issues) page. Provide a clear and concise description of the problem, steps to reproduce it, and any relevant error messages or logs.
46
+
47
+ ### Suggesting Enhancements
48
+
49
+ We welcome ideas for improvements and new features. To suggest an enhancement, open a new issue on the [GitHub Issues](https://github.com/axolotl-ai-cloud/axolotl/issues) page. Describe the enhancement in detail, explain the use case, and outline the benefits it would bring to the project.
50
+
51
+ ### Submitting Pull Requests
52
+
53
+ 1. Create a new branch for your feature or bugfix. Use a descriptive name like `feature/your-feature-name` or `fix/your-bugfix-name`.
54
+ 2. Make your changes, following the [Style Guidelines](#style-guidelines) below.
55
+ 3. Test your changes and ensure that they don't introduce new issues or break existing functionality.
56
+ 4. Commit your changes, following the [commit message guidelines](#commit-messages).
57
+ 5. Push your branch to your fork on GitHub.
58
+ 6. Open a new pull request against the `main` branch of the axolotl repository. Include a clear and concise description of your changes, referencing any related issues.
59
+
60
+ ## Style Guidelines
61
+
62
+ ### Code Style
63
+
64
+ axolotl uses [{codestyle}]({URLofCodestyle}) as its code style guide. Please ensure that your code follows these guidelines.
65
+
66
+ ### Commit Messages
67
+
68
+ Write clear and concise commit messages that briefly describe the changes made in each commit. Use the imperative mood and start with a capitalized verb, e.g., "Add new feature" or "Fix bug in function".
69
+
70
+ ## Additional Resources
71
+
72
+ - [GitHub Help](https://help.github.com/)
73
+ - [GitHub Pull Request Documentation](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests)
74
+ - [{codestyle}]({URLofCodestyle})
75
+
76
+ Thank you once again for your interest in contributing to axolotl. We look forward to collaborating with you and creating an even better project together!
.github/FUNDING.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # These are supported funding model platforms
2
+
3
+ github: [winglian, OpenAccess-AI-Collective] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4
+ patreon: # Replace with a single Patreon username
5
+ open_collective: # Replace with a single Open Collective username
6
+ ko_fi: axolotl_ai # Replace with a single Ko-fi username
7
+ tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8
+ community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9
+ liberapay: # Replace with a single Liberapay username
10
+ issuehunt: # Replace with a single IssueHunt username
11
+ otechie: # Replace with a single Otechie username
12
+ lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13
+ custom: ['https://quickchart.io/qr?text=bitcoin%3Abc1qxlgwlqwfea5s2cxm42xqsfmwjct0rj8w8ea5np&size=480&centerImageUrl=https%3A%2F%2Fupload.wikimedia.org%2Fwikipedia%2Fcommons%2Fthumb%2F4%2F46%2FBitcoin.svg%2F64px-Bitcoin.svg.png'] # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
.github/ISSUE_TEMPLATE/bug-report.yaml ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug Report
2
+ description: File a bug report
3
+ labels: ["bug", "needs triage"]
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: |
8
+ ## Before you start
9
+ Please **make sure you are on the latest version.**
10
+ If you encountered the issue after you installed, updated, or reloaded, **please try restarting before reporting the bug**.
11
+
12
+ - type: checkboxes
13
+ id: no-duplicate-issues
14
+ attributes:
15
+ label: "Please check that this issue hasn't been reported before."
16
+ description: "The **Label filters** may help make your search more focussed."
17
+ options:
18
+ - label: "I searched previous [Bug Reports](https://github.com/axolotl-ai-cloud/axolotl/labels/bug) didn't find any similar reports."
19
+ required: true
20
+
21
+ - type: textarea
22
+ id: expected
23
+ attributes:
24
+ label: Expected Behavior
25
+ description: Tell us what **should** happen.
26
+ validations:
27
+ required: true
28
+
29
+ - type: textarea
30
+ id: what-happened
31
+ attributes:
32
+ label: Current behaviour
33
+ description: |
34
+ Tell us what happens instead of the expected behavior.
35
+ Provide stacktrace and/or screenshots.
36
+ validations:
37
+ required: true
38
+
39
+ - type: textarea
40
+ id: reproduce
41
+ attributes:
42
+ label: Steps to reproduce
43
+ description: |
44
+ Which exact steps can a developer take to reproduce the issue?
45
+ The more detail you provide, the easier it will be to narrow down and fix the bug.
46
+ Please paste in tasks and/or queries **as text, not screenshots**.
47
+ placeholder: |
48
+ Example of the level of detail needed to reproduce any bugs efficiently and reliably.
49
+ 1. Go to the '...' page.
50
+ 2. Click on the '...' button.
51
+ 3. Scroll down to '...'.
52
+ 4. Observe the error.
53
+ validations:
54
+ required: true
55
+
56
+ - type: textarea
57
+ id: config
58
+ attributes:
59
+ label: Config yaml
60
+ description: |
61
+ Please attach the config yaml!
62
+ render: yaml
63
+
64
+ - type: textarea
65
+ id: possible-solution
66
+ attributes:
67
+ label: Possible solution
68
+ description: |
69
+ Not obligatory, but please suggest a fix or reason for the bug, if you have an idea.
70
+
71
+
72
+ - type: checkboxes
73
+ id: operating-systems
74
+ attributes:
75
+ label: Which Operating Systems are you using?
76
+ description: You may select more than one.
77
+ options:
78
+ - label: Linux
79
+ - label: macOS
80
+ - label: Windows
81
+
82
+ - type: input
83
+ id: Python-version
84
+ attributes:
85
+ label: Python Version
86
+ description: Which {Programming} version are you using?
87
+ placeholder: 3.10 / please change accordingly
88
+ validations:
89
+ required: true
90
+
91
+ - type: input
92
+ id: axolotl-branch-commit
93
+ attributes:
94
+ label: axolotl branch-commit
95
+ description: On which branch/commit are you?
96
+ placeholder: main/4d6490b
97
+ validations:
98
+ required: true
99
+
100
+ - type: checkboxes
101
+ id: acknowledgements
102
+ attributes:
103
+ label: 'Acknowledgements'
104
+ description: 'Please confirm the following:'
105
+ options:
106
+ - label: 'My issue title is concise, descriptive, and in title casing.'
107
+ required: true
108
+ - label: 'I have searched the existing issues to make sure this bug has not been reported yet.'
109
+ required: true
110
+ - label: 'I am using the latest version of axolotl.'
111
+ required: true
112
+ - label: 'I have provided enough information for the maintainers to reproduce and diagnose the issue.'
113
+ required: true
.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Ask a question
4
+ url: https://github.com/axolotl-ai-cloud/axolotl/discussions/categories/q-a
5
+ about: Ask questions and discuss with other community members
6
+ - name: Discuss the Project in Discord
7
+ url: https://discord.gg/HhrNrHJPRb
.github/ISSUE_TEMPLATE/docs.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Documentation Improvement / Clarity
2
+ description: Make a suggestion to improve the project documentation.
3
+ labels: ['needs triage', 'docs']
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: '## :book: Documentation :book:'
8
+ - type: markdown
9
+ attributes:
10
+ value: |
11
+ * Ask questions in [Discord](https://discord.gg/HhrNrHJPRb).
12
+ * Before you file an issue read the [Contributing guide](./CONTRIBUTING.md).
13
+ * Check to make sure someone hasn't already opened a [similar issue](https://github.com/axolotl-ai-cloud/axolotl/issues).
14
+ - type: textarea
15
+ attributes:
16
+ label: What piece of documentation is affected?
17
+ description: Please link to the article you'd like to see updated.
18
+ validations:
19
+ required: true
20
+ - type: textarea
21
+ attributes:
22
+ label: What part(s) of the article would you like to see updated?
23
+ description: |
24
+ - Give as much detail as you can to help us understand the change you want to see.
25
+ - Why should the docs be changed? What use cases does it support?
26
+ - What is the expected outcome?
27
+ validations:
28
+ required: true
29
+ - type: textarea
30
+ attributes:
31
+ label: Additional Information
32
+ description: Add any other context or screenshots about the feature request here.
33
+ validations:
34
+ required: false
35
+ - type: checkboxes
36
+ id: acknowledgements
37
+ attributes:
38
+ label: 'Acknowledgements'
39
+ description: 'Please confirm the following:'
40
+ options:
41
+ - label: 'My issue title is concise, descriptive, and in title casing.'
42
+ required: true
43
+ - label: 'I have searched the existing issues to make sure this feature has not been requested yet.'
44
+ required: true
45
+ - label: 'I have provided enough information for the maintainers to understand and evaluate this request.'
46
+ required: true
.github/ISSUE_TEMPLATE/feature-request.yaml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature Request / Enhancement
2
+ description: Suggest a new feature or feature enhancement for the project
3
+ labels: ["enhancement", "needs triage"]
4
+ body:
5
+ - type: checkboxes
6
+ id: no-duplicate-issues
7
+ attributes:
8
+ label: "⚠️ Please check that this feature request hasn't been suggested before."
9
+ description: "There are two locations for previous feature requests. Please search in both. Thank you. The **Label filters** may help make your search more focussed."
10
+ options:
11
+ - label: "I searched previous [Ideas in Discussions](https://github.com/axolotl-ai-cloud/axolotl/discussions/categories/ideas) didn't find any similar feature requests."
12
+ required: true
13
+ - label: "I searched previous [Issues](https://github.com/axolotl-ai-cloud/axolotl/labels/enhancement) didn't find any similar feature requests."
14
+ required: true
15
+
16
+ - type: textarea
17
+ id: feature-description
18
+ validations:
19
+ required: true
20
+ attributes:
21
+ label: "🔖 Feature description"
22
+ description: "A clear and concise description of what the feature request is."
23
+ placeholder: "You should add ..."
24
+
25
+ - type: textarea
26
+ id: solution
27
+ validations:
28
+ required: true
29
+ attributes:
30
+ label: "✔️ Solution"
31
+ description: "A clear and concise description of what you want to happen, and why."
32
+ placeholder: "In my use-case, ..."
33
+
34
+ - type: textarea
35
+ id: alternatives
36
+ validations:
37
+ required: false
38
+ attributes:
39
+ label: "❓ Alternatives"
40
+ description: "A clear and concise description of any alternative solutions or features you've considered."
41
+ placeholder: "I have considered ..."
42
+
43
+ - type: textarea
44
+ id: additional-context
45
+ validations:
46
+ required: false
47
+ attributes:
48
+ label: "📝 Additional Context"
49
+ description: "Add any other context or screenshots about the feature request here."
50
+ placeholder: "..."
51
+
52
+ - type: checkboxes
53
+ id: acknowledgements
54
+ attributes:
55
+ label: 'Acknowledgements'
56
+ description: 'Please confirm the following:'
57
+ options:
58
+ - label: 'My issue title is concise, descriptive, and in title casing.'
59
+ required: true
60
+ - label: 'I have searched the existing issues to make sure this feature has not been requested yet.'
61
+ required: true
62
+ - label: 'I have provided enough information for the maintainers to understand and evaluate this request.'
63
+ required: true
.github/PULL_REQUEST_TEMPLATE.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--- Provide a general summary of your changes in the Title above -->
2
+
3
+ # Description
4
+
5
+ <!--- Describe your changes in detail -->
6
+
7
+ ## Motivation and Context
8
+
9
+ <!--- Why is this change required? What problem does it solve? -->
10
+ <!--- If it fixes an open issue, please link to the issue here. -->
11
+
12
+ ## How has this been tested?
13
+
14
+ <!--- Please describe in detail how you tested your changes. -->
15
+ <!--- Include details of your testing environment, tests ran to see how -->
16
+ <!--- your change affects other areas of the code, etc. -->
17
+
18
+ ## Screenshots (if appropriate)
19
+
20
+ ## Types of changes
21
+
22
+ <!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
23
+
24
+ ## Social Handles (Optional)
25
+
26
+ <!-- Thanks for submitting a bugfix or enhancement. -->
27
+ <!-- We'd love to show our thanks to you on Twitter & Discord if you provide your handle -->
.github/SECURITY.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Security Policy
2
+
3
+ ## Supported Versions
4
+
5
+ Due to the nature of the fast development that is happening in this project, only the latest released version can be supported.
6
+
7
+ ## Reporting a Vulnerability
8
+
9
+ If you find a vulnerability, please contact us on [Discord](https://discord.gg/xcu3ECkH9a) rather than creating a GitHub issue to allow us some time to fix it before it is a known vulnerability to others.
.github/SUPPORT.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Support
2
+
3
+ If you need help with this project or have questions, please:
4
+
5
+ 1. Check the documentation.
6
+ 2. Search the existing issues and pull requests.
7
+ 3. Create a new issue if your question is not answered or your problem is not solved.
8
+ 4. Have a look in the [Discord server](https://discord.gg/HhrNrHJPRb)
9
+
10
+ Please note that this project is maintained by volunteers who have limited availability. We'll do our best to address your questions and concerns in a timely manner.
.github/release-drafter.yml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name-template: 'v$RESOLVED_VERSION'
2
+ tag-template: 'v$RESOLVED_VERSION'
3
+ categories:
4
+ - title: '🚀 Features'
5
+ labels:
6
+ - 'feature'
7
+ - 'enhancement'
8
+ - title: '🐛 Bug Fixes'
9
+ labels:
10
+ - 'fix'
11
+ - 'bugfix'
12
+ - 'bug'
13
+ - title: '🧰 Maintenance'
14
+ label: 'chore'
15
+ change-template: '- $TITLE @$AUTHOR (#$NUMBER)'
16
+ change-title-escapes: '\<*_&' # You can add # and @ to disable mentions, and add ` to disable code blocks.
17
+ version-resolver:
18
+ major:
19
+ labels:
20
+ - 'major'
21
+ minor:
22
+ labels:
23
+ - 'minor'
24
+ patch:
25
+ labels:
26
+ - 'patch'
27
+ default: patch
28
+ template: |
29
+ ## What’s Changed
30
+
31
+ $CHANGES
.github/workflows/base.yml ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ci-cd-base
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - "main"
7
+ paths:
8
+ - 'Dockerfile-base'
9
+ - '.github/workflows/base.yml'
10
+ pull_request:
11
+ paths:
12
+ - 'Dockerfile-base'
13
+ - '.github/workflows/base.yml'
14
+ workflow_dispatch:
15
+
16
+ jobs:
17
+ build-base:
18
+ if: github.repository_owner == 'axolotl-ai-cloud'
19
+ # this job needs to be run on self-hosted GPU runners...
20
+ runs-on: axolotl-gpu-runner
21
+ strategy:
22
+ fail-fast: false
23
+ matrix:
24
+ include:
25
+ - cuda: "124"
26
+ cuda_version: 12.4.1
27
+ cudnn_version: ""
28
+ python_version: "3.11"
29
+ pytorch: 2.4.1
30
+ torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
31
+ - cuda: "124"
32
+ cuda_version: 12.4.1
33
+ cudnn_version: ""
34
+ python_version: "3.11"
35
+ pytorch: 2.5.1
36
+ torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
37
+ - cuda: "124"
38
+ cuda_version: 12.4.1
39
+ cudnn_version: ""
40
+ python_version: "3.11"
41
+ pytorch: 2.6.0
42
+ torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
43
+ - cuda: "128"
44
+ cuda_version: 12.8.1
45
+ cudnn_version: ""
46
+ python_version: "3.11"
47
+ pytorch: nightly
48
+ torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
49
+ steps:
50
+ - name: Checkout
51
+ uses: actions/checkout@v4
52
+ - name: Docker metadata
53
+ id: metadata
54
+ uses: docker/metadata-action@v5
55
+ with:
56
+ images: |
57
+ winglian/axolotl-base
58
+ axolotlai/axolotl-base
59
+ - name: Login to Docker Hub
60
+ uses: docker/login-action@v2
61
+ with:
62
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
63
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
64
+ - name: Set up Docker Buildx
65
+ uses: docker/setup-buildx-action@v3
66
+ - name: Build
67
+ uses: docker/build-push-action@v4
68
+ with:
69
+ context: .
70
+ file: ${{ matrix.pytorch == 'nightly' && './docker/Dockerfile-base-nightly' || './docker/Dockerfile-base' }}
71
+ push: ${{ github.event_name != 'pull_request' }}
72
+ tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
73
+ labels: ${{ steps.metadata.outputs.labels }}
74
+ build-args: |
75
+ CUDA_VERSION=${{ matrix.cuda_version }}
76
+ CUDNN_VERSION=${{ matrix.cudnn_version }}
77
+ CUDA=${{ matrix.cuda }}
78
+ PYTHON_VERSION=${{ matrix.python_version }}
79
+ PYTORCH_VERSION=${{ matrix.pytorch }}
80
+ TORCH_CUDA_ARCH_LIST=${{ matrix.torch_cuda_arch_list }}
.github/workflows/docs.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish Docs
2
+ on:
3
+ push:
4
+ branches:
5
+ - main
6
+
7
+ permissions:
8
+ contents: write
9
+ pages: write
10
+
11
+ jobs:
12
+ build-deploy:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Check out repository
16
+ uses: actions/checkout@v4
17
+ - name: Set up Quarto
18
+ uses: quarto-dev/quarto-actions/setup@v2
19
+ - name: Setup Python
20
+ uses: actions/setup-python@v5
21
+ with:
22
+ python-version: '3.11'
23
+ - name: Install dependencies
24
+ run: |
25
+ python3 -m pip install jupyter quartodoc
26
+ python3 -m pip install -e . --no-deps
27
+ - name: Build autodoc
28
+ run: quartodoc build
29
+ - name: Publish to GitHub Pages (and render)
30
+ uses: quarto-dev/quarto-actions/publish@v2
31
+ with:
32
+ target: gh-pages
33
+ env:
34
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
.github/workflows/lint.yml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: lint
2
+ on:
3
+ # check on PRs, and manual triggers
4
+ merge_group:
5
+ pull_request:
6
+ paths:
7
+ - '**.py'
8
+ - 'requirements.txt'
9
+ - '.github/workflows/*.yml'
10
+ - "*.[q]md"
11
+ - "examples/**/*.y[a]?ml"
12
+ workflow_dispatch:
13
+
14
+ jobs:
15
+ pre-commit:
16
+ name: pre-commit
17
+ runs-on: ubuntu-latest
18
+ steps:
19
+ - uses: actions/checkout@v4
20
+ - uses: actions/setup-python@v5
21
+ with:
22
+ python-version: "3.11"
23
+ cache: 'pip' # caching pip dependencies
24
+ - uses: pre-commit/action@v3.0.1
.github/workflows/main.yml ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ci-cd
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - "main"
7
+ tags:
8
+ - "v*"
9
+ workflow_dispatch:
10
+
11
+ jobs:
12
+ build-axolotl:
13
+ if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
14
+ strategy:
15
+ fail-fast: false
16
+ matrix:
17
+ include:
18
+ - cuda: 124
19
+ cuda_version: 12.4.1
20
+ python_version: "3.11"
21
+ pytorch: 2.4.1
22
+ axolotl_extras:
23
+ - cuda: 124
24
+ cuda_version: 12.4.1
25
+ python_version: "3.11"
26
+ pytorch: 2.5.1
27
+ axolotl_extras: vllm
28
+ is_latest: true
29
+ - cuda: 124
30
+ cuda_version: 12.4.1
31
+ python_version: "3.11"
32
+ pytorch: 2.6.0
33
+ axolotl_extras:
34
+ runs-on: axolotl-gpu-runner
35
+ steps:
36
+ - name: Checkout
37
+ uses: actions/checkout@v4
38
+ - name: Docker metadata
39
+ id: metadata
40
+ uses: docker/metadata-action@v5
41
+ with:
42
+ images: |
43
+ winglian/axolotl
44
+ axolotlai/axolotl
45
+ tags: |
46
+ type=ref,event=branch
47
+ type=pep440,pattern={{version}}
48
+ - name: Set up Docker Buildx
49
+ uses: docker/setup-buildx-action@v3
50
+ - name: Login to Docker Hub
51
+ uses: docker/login-action@v3
52
+ with:
53
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
54
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
55
+ # guidance for testing before pushing: https://docs.docker.com/build/ci/github-actions/test-before-push/
56
+ - name: Build and export to Docker
57
+ uses: docker/build-push-action@v5
58
+ with:
59
+ context: .
60
+ build-args: |
61
+ BASE_TAG=${{ github.ref_type == 'tag' && 'main' || github.ref_name }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}
62
+ CUDA=${{ matrix.cuda }}
63
+ PYTORCH_VERSION=${{ matrix.pytorch }}
64
+ AXOLOTL_ARGS=${{ matrix.axolotl_args }}
65
+ file: ./docker/Dockerfile
66
+ push: ${{ github.event_name != 'pull_request' }}
67
+ tags: |
68
+ ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
69
+ ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}
70
+ ${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
71
+ labels: ${{ steps.metadata.outputs.labels }}
72
+
73
+ build-axolotl-cloud:
74
+ needs: build-axolotl
75
+ if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
76
+ # this job needs to be run on self-hosted GPU runners...
77
+ strategy:
78
+ matrix:
79
+ include:
80
+ - cuda: 124
81
+ cuda_version: 12.4.1
82
+ python_version: "3.11"
83
+ pytorch: 2.4.1
84
+ axolotl_extras:
85
+ - cuda: 124
86
+ cuda_version: 12.4.1
87
+ python_version: "3.11"
88
+ pytorch: 2.5.1
89
+ axolotl_extras:
90
+ is_latest: true
91
+ - cuda: 124
92
+ cuda_version: 12.4.1
93
+ python_version: "3.11"
94
+ pytorch: 2.6.0
95
+ axolotl_extras:
96
+ runs-on: axolotl-gpu-runner
97
+ steps:
98
+ - name: Checkout
99
+ uses: actions/checkout@v4
100
+ - name: Docker metadata
101
+ id: metadata
102
+ uses: docker/metadata-action@v5
103
+ with:
104
+ images: |
105
+ winglian/axolotl-cloud
106
+ axolotlai/axolotl-cloud
107
+ tags: |
108
+ type=ref,event=branch
109
+ type=pep440,pattern={{version}}
110
+ - name: Login to Docker Hub
111
+ uses: docker/login-action@v3
112
+ with:
113
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
114
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
115
+ - name: Set up Docker Buildx
116
+ uses: docker/setup-buildx-action@v3
117
+ - name: Build
118
+ uses: docker/build-push-action@v5
119
+ with:
120
+ context: .
121
+ build-args: |
122
+ BASE_TAG=${{ github.ref_type == 'tag' && 'main' || github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
123
+ CUDA=${{ matrix.cuda }}
124
+ file: ./docker/Dockerfile-cloud
125
+ push: ${{ github.event_name != 'pull_request' }}
126
+ tags: |
127
+ ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
128
+ ${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
129
+ labels: ${{ steps.metadata.outputs.labels }}
130
+
131
+ build-axolotl-cloud-no-tmux:
132
+ needs: build-axolotl
133
+ if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
134
+ # this job needs to be run on self-hosted GPU runners...
135
+ strategy:
136
+ matrix:
137
+ include:
138
+ - cuda: 124
139
+ cuda_version: 12.4.1
140
+ python_version: "3.11"
141
+ pytorch: 2.4.1
142
+ axolotl_extras:
143
+ runs-on: axolotl-gpu-runner
144
+ steps:
145
+ - name: Checkout
146
+ uses: actions/checkout@v4
147
+ - name: Docker metadata
148
+ id: metadata
149
+ uses: docker/metadata-action@v5
150
+ with:
151
+ images: |
152
+ winglian/axolotl-cloud-term
153
+ axolotlai/axolotl-cloud-term
154
+ tags: |
155
+ type=ref,event=branch
156
+ type=pep440,pattern={{version}}
157
+ - name: Login to Docker Hub
158
+ uses: docker/login-action@v3
159
+ with:
160
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
161
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
162
+ - name: Set up Docker Buildx
163
+ uses: docker/setup-buildx-action@v3
164
+ - name: Build
165
+ uses: docker/build-push-action@v5
166
+ with:
167
+ context: .
168
+ build-args: |
169
+ BASE_TAG=${{ github.ref_type == 'tag' && 'main' || github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
170
+ CUDA=${{ matrix.cuda }}
171
+ file: ./docker/Dockerfile-cloud-no-tmux
172
+ push: ${{ github.event_name != 'pull_request' }}
173
+ tags: |
174
+ ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
175
+ ${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
176
+ labels: ${{ steps.metadata.outputs.labels }}
.github/workflows/multi-gpu-e2e.yml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: docker-multigpu-tests-biweekly
2
+
3
+ on:
4
+ pull_request:
5
+ paths:
6
+ - 'tests/e2e/multigpu/*.py'
7
+ - 'requirements.txt'
8
+ - 'setup.py'
9
+ - 'pyproject.toml'
10
+ - '.github/workflows/multi-gpu-e2e.yml'
11
+ workflow_dispatch:
12
+ schedule:
13
+ - cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
14
+
15
+ # Cancel jobs on the same ref if a new one is triggered
16
+ concurrency:
17
+ group: ${{ github.workflow }}-${{ github.ref }}
18
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
19
+
20
+ jobs:
21
+ test-axolotl-multigpu:
22
+ if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
23
+ strategy:
24
+ fail-fast: false
25
+ matrix:
26
+ include:
27
+ - cuda: 124
28
+ cuda_version: 12.4.1
29
+ python_version: "3.11"
30
+ pytorch: 2.4.1
31
+ axolotl_extras: # no vllm support for 2.4.1
32
+ num_gpus: 2
33
+ nightly_build: "true"
34
+ - cuda: 124
35
+ cuda_version: 12.4.1
36
+ python_version: "3.11"
37
+ pytorch: 2.5.1
38
+ axolotl_extras: vllm
39
+ num_gpus: 2
40
+ nightly_build: "true"
41
+ - cuda: 124
42
+ cuda_version: 12.4.1
43
+ python_version: "3.11"
44
+ pytorch: 2.6.0
45
+ # awaiting vllm#12721
46
+ axolotl_extras:
47
+ num_gpus: 2
48
+ nightly_build: "true"
49
+ runs-on: [self-hosted, modal]
50
+ timeout-minutes: 120
51
+ steps:
52
+ - name: Checkout
53
+ uses: actions/checkout@v4
54
+ - name: Install Python
55
+ uses: actions/setup-python@v5
56
+ with:
57
+ python-version: "3.11"
58
+ - name: Install Modal
59
+ run: |
60
+ python -m pip install --upgrade pip
61
+ pip install modal==0.71.8 jinja2
62
+ - name: Update env vars
63
+ run: |
64
+ echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
65
+ echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
66
+ echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
67
+ echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
68
+ echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
69
+ echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
70
+ echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
71
+ - name: Run tests job on Modal
72
+ run: |
73
+ modal run cicd.multigpu
.github/workflows/nightlies.yml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: docker-nightlies
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: '0 0 * * *' # Runs at 00:00 UTC every day
7
+
8
+ jobs:
9
+ build-axolotl:
10
+ if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
11
+ strategy:
12
+ fail-fast: false
13
+ matrix:
14
+ include:
15
+ - cuda: 124
16
+ cuda_version: 12.4.1
17
+ python_version: "3.11"
18
+ pytorch: 2.4.1
19
+ axolotl_extras:
20
+ - cuda: 124
21
+ cuda_version: 12.4.1
22
+ python_version: "3.11"
23
+ pytorch: 2.5.1
24
+ axolotl_extras:
25
+ - cuda: 124
26
+ cuda_version: 12.4.1
27
+ python_version: "3.11"
28
+ pytorch: 2.6.0
29
+ axolotl_extras:
30
+ runs-on: axolotl-gpu-runner
31
+ steps:
32
+ - name: Checkout
33
+ uses: actions/checkout@v4
34
+ - name: Docker metadata
35
+ id: metadata
36
+ uses: docker/metadata-action@v5
37
+ with:
38
+ images: |
39
+ winglian/axolotl
40
+ axolotlai/axolotl
41
+ tags: |
42
+ type=raw,value={{ branch }}-{{ date 'YYYYMMDD' }}
43
+ - name: Set up Docker Buildx
44
+ uses: docker/setup-buildx-action@v3
45
+ - name: Login to Docker Hub
46
+ uses: docker/login-action@v3
47
+ with:
48
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
49
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
50
+ # guidance for testing before pushing: https://docs.docker.com/build/ci/github-actions/test-before-push/
51
+ - name: Build and export to Docker
52
+ uses: docker/build-push-action@v5
53
+ with:
54
+ context: .
55
+ build-args: |
56
+ BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}
57
+ CUDA=${{ matrix.cuda }}
58
+ PYTORCH_VERSION=${{ matrix.pytorch }}
59
+ AXOLOTL_ARGS=${{ matrix.axolotl_args }}
60
+ file: ./docker/Dockerfile
61
+ push: ${{ github.event_name != 'pull_request' }}
62
+ tags: |
63
+ ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
64
+ labels: ${{ steps.metadata.outputs.labels }}
65
+
66
+ build-axolotl-cloud:
67
+ needs: build-axolotl
68
+ if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
69
+ # this job needs to be run on self-hosted GPU runners...
70
+ strategy:
71
+ matrix:
72
+ include:
73
+ - cuda: 124
74
+ cuda_version: 12.4.1
75
+ python_version: "3.11"
76
+ pytorch: 2.4.1
77
+ axolotl_extras:
78
+ - cuda: 124
79
+ cuda_version: 12.4.1
80
+ python_version: "3.11"
81
+ pytorch: 2.5.1
82
+ axolotl_extras:
83
+ - cuda: 124
84
+ cuda_version: 12.4.1
85
+ python_version: "3.11"
86
+ pytorch: 2.6.0
87
+ axolotl_extras:
88
+ runs-on: axolotl-gpu-runner
89
+ steps:
90
+ - name: Checkout
91
+ uses: actions/checkout@v4
92
+ - name: Docker metadata
93
+ id: metadata
94
+ uses: docker/metadata-action@v5
95
+ with:
96
+ images: |
97
+ winglian/axolotl-cloud
98
+ axolotlai/axolotl-cloud
99
+ tags: |
100
+ type=raw,value={{ branch }}-{{ date 'YYYYMMDD' }}
101
+ - name: Login to Docker Hub
102
+ uses: docker/login-action@v3
103
+ with:
104
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
105
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
106
+ - name: Set up Docker Buildx
107
+ uses: docker/setup-buildx-action@v3
108
+ - name: Build
109
+ uses: docker/build-push-action@v5
110
+ with:
111
+ context: .
112
+ build-args: |
113
+ BASE_TAG=${{ github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
114
+ CUDA=${{ matrix.cuda }}
115
+ file: ./docker/Dockerfile-cloud
116
+ push: ${{ github.event_name != 'pull_request' }}
117
+ tags: |
118
+ ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
119
+ labels: ${{ steps.metadata.outputs.labels }}
.github/workflows/precommit-autoupdate.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Pre-commit auto-update
2
+
3
+ on:
4
+ schedule:
5
+ - cron: '0 0 * * 0' # Run weekly
6
+ workflow_dispatch: # Manual kickoff
7
+
8
+ jobs:
9
+ auto-update:
10
+ runs-on: ubuntu-latest
11
+ permissions:
12
+ contents: write
13
+ pull-requests: write
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+
17
+ - uses: actions/setup-python@v5
18
+ with:
19
+ python-version: '3.11'
20
+
21
+ - name: Update pre-commit hooks
22
+ id: update
23
+ run: |
24
+ pip install pre-commit
25
+ pre-commit autoupdate
26
+ if [[ -n $(git status --porcelain) ]]; then
27
+ echo "changes=true" >> $GITHUB_OUTPUT
28
+ git diff .pre-commit-config.yaml > pre-commit-update.diff
29
+ fi
30
+
31
+ - name: Create Pull Request
32
+ if: steps.update.outputs.changes == 'true'
33
+ uses: peter-evans/create-pull-request@v6
34
+ with:
35
+ token: ${{ secrets.GITHUB_TOKEN }}
36
+ branch: update/pre-commit-hooks
37
+ delete-branch: true
38
+ title: "chore: update pre-commit hooks"
39
+ commit-message: "chore: update pre-commit hooks"
40
+ body: |
41
+ Automated PR to update pre-commit hooks to their latest versions.
42
+
43
+ <details>
44
+ <summary>Changes:</summary>
45
+
46
+ ```diff
47
+ ${{ steps.update.outputs.diff }}
48
+ ```
49
+ </details>
.github/workflows/pypi.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: publish pypi
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - 'v*'
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ setup_release:
11
+ name: Create Release
12
+ runs-on: ubuntu-latest
13
+ permissions:
14
+ contents: write
15
+ steps:
16
+ - name: Checkout code
17
+ uses: actions/checkout@v4
18
+
19
+ - name: Create release
20
+ env:
21
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
22
+ run: gh release create "$GITHUB_REF_NAME" --generate-notes
23
+ pypi-publish:
24
+ name: Upload release to PyPI
25
+ runs-on: ubuntu-latest
26
+ needs: [setup_release]
27
+ environment:
28
+ name: pypi
29
+ url: https://pypi.org/p/axolotl
30
+ permissions:
31
+ id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
32
+ steps:
33
+ - name: Check out repository code
34
+ uses: actions/checkout@v4
35
+
36
+ - name: Setup Python
37
+ uses: actions/setup-python@v5
38
+ with:
39
+ python-version: "3.11"
40
+
41
+ - name: Install dependencies
42
+ run: |
43
+ pip3 install wheel packaging==23.2
44
+ pip3 install --no-build-isolation -e .
45
+ pip3 install -r requirements-dev.txt -r requirements-tests.txt
46
+
47
+ - name: Extract tag name
48
+ id: tag
49
+ run: echo ::set-output name=TAG_NAME::$(echo $GITHUB_REF | cut -d / -f 3)
50
+
51
+ - name: Update version in setup.py
52
+ run: |
53
+ sed -i -E 's/version="([0-9.]+)",/version="${{ steps.tag.outputs.TAG_NAME }}",/g' setup.py
54
+
55
+ - name: Build a source dist
56
+ run: |
57
+ python setup.py sdist
58
+
59
+ - name: Publish package distributions to PyPI
60
+ uses: pypa/gh-action-pypi-publish@release/v1
.github/workflows/tests-nightly.yml ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Tests Nightly against upstream main
2
+ on:
3
+ workflow_dispatch:
4
+ schedule:
5
+ - cron: '0 0 * * *' # Runs at 00:00 UTC every day
6
+
7
+ jobs:
8
+ pre-commit:
9
+ name: pre-commit
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+ - uses: actions/setup-python@v5
14
+ with:
15
+ python-version: "3.11"
16
+ cache: 'pip' # caching pip dependencies
17
+ - uses: pre-commit/action@v3.0.1
18
+ env:
19
+ SKIP: no-commit-to-branch
20
+
21
+ pytest:
22
+ name: PyTest
23
+ runs-on: ubuntu-latest
24
+ strategy:
25
+ fail-fast: false
26
+ max-parallel: 2
27
+ matrix:
28
+ python_version: ["3.11"]
29
+ pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
30
+ timeout-minutes: 20
31
+
32
+ steps:
33
+ - name: Check out repository code
34
+ uses: actions/checkout@v4
35
+
36
+ - name: Setup Python
37
+ uses: actions/setup-python@v5
38
+ with:
39
+ python-version: ${{ matrix.python_version }}
40
+ cache: 'pip' # caching pip dependencies
41
+
42
+ - name: upgrade pip
43
+ run: |
44
+ pip3 install --upgrade pip
45
+ pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
46
+
47
+ - name: Install PyTorch
48
+ run: |
49
+ pip3 install torch==${{ matrix.pytorch_version }} --index-url https://download.pytorch.org/whl/cpu
50
+
51
+ - name: Update requirements.txt
52
+ run: |
53
+ sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt
54
+ sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt
55
+ sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt
56
+ sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt
57
+ sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt
58
+
59
+ - name: Install dependencies
60
+ run: |
61
+ pip3 install --upgrade pip
62
+ pip3 install --upgrade packaging==23.2
63
+ pip3 install --no-build-isolation -U -e .
64
+ python scripts/unsloth_install.py | sh
65
+ python scripts/cutcrossentropy_install.py | sh
66
+ pip3 install -r requirements-dev.txt -r requirements-tests.txt
67
+
68
+ - name: Make sure PyTorch version wasn't clobbered
69
+ run: |
70
+ python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
71
+
72
+ - name: Ensure axolotl CLI was installed
73
+ run: |
74
+ axolotl --help
75
+
76
+ - name: Run tests
77
+ run: |
78
+ pytest -n8 --dist loadfile --ignore=tests/e2e/ --ignore=tests/patched/ tests/
79
+ pytest tests/patched/
80
+
81
+ - name: cleanup pip cache
82
+ run: |
83
+ find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
84
+
85
+ docker-e2e-tests:
86
+ if: github.repository_owner == 'axolotl-ai-cloud'
87
+ # this job needs to be run on self-hosted GPU runners...
88
+ runs-on: [self-hosted, modal]
89
+ timeout-minutes: 60
90
+ needs: [pre-commit, pytest]
91
+
92
+ strategy:
93
+ fail-fast: false
94
+ matrix:
95
+ include:
96
+ - cuda: 124
97
+ cuda_version: 12.4.1
98
+ python_version: "3.11"
99
+ pytorch: 2.4.1
100
+ num_gpus: 1
101
+ axolotl_extras:
102
+ nightly_build: "true"
103
+ - cuda: 124
104
+ cuda_version: 12.4.1
105
+ python_version: "3.11"
106
+ pytorch: 2.5.1
107
+ num_gpus: 1
108
+ axolotl_extras:
109
+ nightly_build: "true"
110
+ - cuda: 124
111
+ cuda_version: 12.4.1
112
+ python_version: "3.11"
113
+ pytorch: 2.6.0
114
+ num_gpus: 1
115
+ axolotl_extras:
116
+ nightly_build: "true"
117
+ steps:
118
+ - name: Checkout
119
+ uses: actions/checkout@v4
120
+ - name: Install Python
121
+ uses: actions/setup-python@v5
122
+ with:
123
+ python-version: "3.11"
124
+ - name: Install Modal
125
+ run: |
126
+ python -m pip install --upgrade pip
127
+ pip install modal==0.71.8 jinja2
128
+ - name: Update env vars
129
+ run: |
130
+ echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
131
+ echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
132
+ echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
133
+ echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
134
+ echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
135
+ echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
136
+ echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
137
+ - name: Run tests job on Modal
138
+ run: |
139
+ modal run cicd.tests
.github/workflows/tests.yml ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Tests
2
+ on:
3
+ # check on push/merge to main, PRs, and manual triggers
4
+ merge_group:
5
+ push:
6
+ branches:
7
+ - "main"
8
+ paths:
9
+ - '**.py'
10
+ - 'requirements.txt'
11
+ - '.github/workflows/*.yml'
12
+ - 'requirements-tests.txt'
13
+ - 'cicd/cicd.sh'
14
+ - 'cicd/Dockerfile.jinja'
15
+ pull_request:
16
+ paths:
17
+ - '**.py'
18
+ - 'requirements.txt'
19
+ - '.github/workflows/*.yml'
20
+ - 'requirements-tests.txt'
21
+ - 'cicd/cicd.sh'
22
+ - 'cicd/Dockerfile.jinja'
23
+ workflow_dispatch:
24
+
25
+ # Cancel jobs on the same ref if a new one is triggered
26
+ concurrency:
27
+ group: ${{ github.workflow }}-${{ github.ref }}
28
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
29
+
30
+ jobs:
31
+ pre-commit:
32
+ name: pre-commit
33
+ runs-on: ubuntu-latest
34
+ steps:
35
+ - uses: actions/checkout@v4
36
+ - uses: actions/setup-python@v5
37
+ with:
38
+ python-version: "3.11"
39
+ cache: 'pip' # caching pip dependencies
40
+ - uses: pre-commit/action@v3.0.1
41
+ env:
42
+ SKIP: no-commit-to-branch
43
+
44
+ pytest:
45
+ name: PyTest
46
+ runs-on: ubuntu-latest
47
+ strategy:
48
+ fail-fast: false
49
+ max-parallel: 2
50
+ matrix:
51
+ python_version: ["3.11"]
52
+ pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
53
+ timeout-minutes: 20
54
+
55
+ steps:
56
+ - name: Check out repository code
57
+ uses: actions/checkout@v4
58
+
59
+ - name: Restore HF cache
60
+ id: hf-cache-restore
61
+ uses: actions/cache/restore@v4
62
+ with:
63
+ path: |
64
+ /home/runner/.cache/huggingface/hub/datasets--*
65
+ /home/runner/.cache/huggingface/hub/models--*
66
+ key: ${{ runner.os }}-hf-hub-cache-${{ hashFiles('**/conftest.py') }}
67
+
68
+ - name: Setup Python
69
+ uses: actions/setup-python@v5
70
+ with:
71
+ python-version: ${{ matrix.python_version }}
72
+ cache: 'pip' # caching pip dependencies
73
+
74
+ - name: upgrade pip
75
+ run: |
76
+ pip3 install --upgrade pip
77
+ pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
78
+
79
+ - name: Install PyTorch
80
+ run: |
81
+ pip3 install torch==${{ matrix.pytorch_version }}
82
+
83
+ - name: Install dependencies
84
+ run: |
85
+ pip3 show torch
86
+ pip3 install --no-build-isolation -U -e .
87
+ python scripts/unsloth_install.py | sh
88
+ python scripts/cutcrossentropy_install.py | sh
89
+ pip3 install -r requirements-dev.txt -r requirements-tests.txt
90
+
91
+ - name: Make sure PyTorch version wasn't clobbered
92
+ run: |
93
+ python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
94
+
95
+ - name: Ensure axolotl CLI was installed
96
+ run: |
97
+ axolotl --help
98
+
99
+ - name: Run tests
100
+ run: |
101
+ pytest -v -n8 --dist loadfile --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ tests/
102
+ pytest -v tests/patched/
103
+ pytest -v tests/cli/
104
+
105
+ - name: cleanup pip cache
106
+ run: |
107
+ find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
108
+
109
+ - name: Save HF cache
110
+ id: hf-cache
111
+ uses: actions/cache/save@v4
112
+ with:
113
+ path: |
114
+ /home/runner/.cache/huggingface/hub/datasets--*
115
+ /home/runner/.cache/huggingface/hub/models--*
116
+ key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
117
+
118
+ pytest-sdist:
119
+ name: PyTest from Source Dist
120
+ runs-on: ubuntu-latest
121
+ strategy:
122
+ fail-fast: false
123
+ max-parallel: 1
124
+ matrix:
125
+ python_version: ["3.11"]
126
+ pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
127
+ timeout-minutes: 20
128
+
129
+ steps:
130
+ - name: Check out repository code
131
+ uses: actions/checkout@v4
132
+
133
+ - name: Restore HF cache
134
+ id: hf-cache-restore
135
+ uses: actions/cache/restore@v4
136
+ with:
137
+ path: |
138
+ /home/runner/.cache/huggingface/hub/datasets--*
139
+ /home/runner/.cache/huggingface/hub/models--*
140
+ key: ${{ runner.os }}-hf-hub-cache-${{ hashFiles('**/conftest.py') }}
141
+
142
+ - name: Setup Python
143
+ uses: actions/setup-python@v5
144
+ with:
145
+ python-version: ${{ matrix.python_version }}
146
+ cache: 'pip' # caching pip dependencies
147
+
148
+ - name: upgrade pip
149
+ run: |
150
+ pip3 install --upgrade pip
151
+ pip3 install --upgrade packaging==23.2 setuptools==75.8.0 setuptools_scm build wheel
152
+
153
+ - name: Install PyTorch
154
+ run: |
155
+ pip3 install torch==${{ matrix.pytorch_version }}
156
+
157
+ - name: Install dependencies
158
+ run: |
159
+ pip3 show torch
160
+ python -m build --no-isolation --sdist
161
+ pip3 install --no-build-isolation dist/axolotl*.tar.gz
162
+ python scripts/unsloth_install.py | sh
163
+ python scripts/cutcrossentropy_install.py | sh
164
+ pip3 install -r requirements-dev.txt -r requirements-tests.txt
165
+
166
+ - name: Make sure PyTorch version wasn't clobbered
167
+ run: |
168
+ python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
169
+
170
+ - name: Ensure axolotl CLI was installed
171
+ run: |
172
+ axolotl --help
173
+
174
+ - name: Run tests
175
+ run: |
176
+ pytest -v -n8 --dist loadfile --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ tests/
177
+ pytest -v tests/patched/
178
+ pytest -v tests/cli/
179
+
180
+ - name: cleanup pip cache
181
+ run: |
182
+ find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
183
+
184
+ - name: Save HF cache
185
+ id: hf-cache
186
+ uses: actions/cache/save@v4
187
+ with:
188
+ path: |
189
+ /home/runner/.cache/huggingface/hub/datasets--*
190
+ /home/runner/.cache/huggingface/hub/models--*
191
+ key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
192
+
193
+ docker-e2e-tests-1st:
194
+ if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
195
+ # this job needs to be run on self-hosted GPU runners...
196
+ runs-on: [self-hosted, modal]
197
+ timeout-minutes: 90
198
+ needs: [pre-commit, pytest, pytest-sdist]
199
+
200
+ strategy:
201
+ fail-fast: false
202
+ matrix:
203
+ include:
204
+ - cuda: 124
205
+ cuda_version: 12.4.1
206
+ python_version: "3.11"
207
+ pytorch: 2.5.1
208
+ num_gpus: 1
209
+ axolotl_extras: vllm
210
+ steps:
211
+ - name: Checkout
212
+ uses: actions/checkout@v4
213
+ - name: Install Python
214
+ uses: actions/setup-python@v5
215
+ with:
216
+ python-version: "3.11"
217
+ - name: Install Modal
218
+ run: |
219
+ python -m pip install --upgrade pip
220
+ pip install modal==0.71.8 jinja2
221
+ - name: Update env vars
222
+ run: |
223
+ echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
224
+ echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
225
+ echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
226
+ echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
227
+ echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
228
+ echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
229
+ echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
230
+ - name: Run tests job on Modal
231
+ run: |
232
+ modal run cicd.tests
233
+
234
+ docker-e2e-tests:
235
+ if: github.repository_owner == 'axolotl-ai-cloud'
236
+ # this job needs to be run on self-hosted GPU runners...
237
+ runs-on: [self-hosted, modal]
238
+ timeout-minutes: 90
239
+ needs: [pre-commit, pytest, docker-e2e-tests-1st]
240
+
241
+ strategy:
242
+ fail-fast: false
243
+ matrix:
244
+ include:
245
+ - cuda: 124
246
+ cuda_version: 12.4.1
247
+ python_version: "3.11"
248
+ pytorch: 2.4.1
249
+ num_gpus: 1
250
+ axolotl_extras:
251
+ - cuda: 124
252
+ cuda_version: 12.4.1
253
+ python_version: "3.11"
254
+ pytorch: 2.6.0
255
+ num_gpus: 1
256
+ axolotl_extras:
257
+ steps:
258
+ - name: Checkout
259
+ uses: actions/checkout@v4
260
+ - name: Install Python
261
+ uses: actions/setup-python@v5
262
+ with:
263
+ python-version: "3.11"
264
+ - name: Install Modal
265
+ run: |
266
+ python -m pip install --upgrade pip
267
+ pip install modal==0.71.8 jinja2
268
+ - name: Update env vars
269
+ run: |
270
+ echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
271
+ echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
272
+ echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
273
+ echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
274
+ echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
275
+ echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
276
+ echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
277
+ - name: Run tests job on Modal
278
+ run: |
279
+ modal run cicd.tests
.gitignore ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/axolotl.egg-info
2
+ configs
3
+ last_run_prepared/
4
+ outputs
5
+ .vscode
6
+ _site/
7
+
8
+ # Byte-compiled / optimized / DLL files
9
+ __pycache__/
10
+ *.py[cod]
11
+ *$py.class
12
+
13
+ # C extensions
14
+ *.so
15
+
16
+ # Distribution / packaging
17
+ .Python
18
+ build/
19
+ develop-eggs/
20
+ dist/
21
+ downloads/
22
+ eggs/
23
+ .eggs/
24
+ lib/
25
+ lib64/
26
+ parts/
27
+ sdist/
28
+ var/
29
+ wheels/
30
+ share/python-wheels/
31
+ *.egg-info/
32
+ .installed.cfg
33
+ *.egg
34
+ MANIFEST
35
+
36
+ # PyInstaller
37
+ # Usually these files are written by a python script from a template
38
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
39
+ *.manifest
40
+ *.spec
41
+
42
+ # Installer logs
43
+ pip-log.txt
44
+ pip-delete-this-directory.txt
45
+
46
+ # Unit test / coverage reports
47
+ htmlcov/
48
+ .tox/
49
+ .nox/
50
+ .coverage
51
+ .coverage.*
52
+ .cache
53
+ nosetests.xml
54
+ coverage.xml
55
+ *.cover
56
+ *.py,cover
57
+ .hypothesis/
58
+ .pytest_cache/
59
+ cover/
60
+
61
+ # Translations
62
+ *.mo
63
+ *.pot
64
+
65
+ # Django stuff:
66
+ *.log
67
+ local_settings.py
68
+ db.sqlite3
69
+ db.sqlite3-journal
70
+
71
+ # Flask stuff:
72
+ instance/
73
+ .webassets-cache
74
+
75
+ # Scrapy stuff:
76
+ .scrapy
77
+
78
+ # Sphinx documentation
79
+ docs/_build/
80
+
81
+ # PyBuilder
82
+ .pybuilder/
83
+ target/
84
+
85
+ # Jupyter Notebook
86
+ .ipynb_checkpoints
87
+
88
+ # IPython
89
+ profile_default/
90
+ ipython_config.py
91
+
92
+ # pyenv
93
+ # For a library or package, you might want to ignore these files since the code is
94
+ # intended to run in multiple environments; otherwise, check them in:
95
+ # .python-version
96
+
97
+ # pipenv
98
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
99
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
100
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
101
+ # install all needed dependencies.
102
+ #Pipfile.lock
103
+
104
+ # poetry
105
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
106
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
107
+ # commonly ignored for libraries.
108
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
109
+ #poetry.lock
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ #pdm.lock
114
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
115
+ # in version control.
116
+ # https://pdm.fming.dev/#use-with-ide
117
+ .pdm.toml
118
+
119
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
120
+ __pypackages__/
121
+
122
+ # Celery stuff
123
+ celerybeat-schedule
124
+ celerybeat.pid
125
+
126
+ # SageMath parsed files
127
+ *.sage.py
128
+
129
+ # Environments
130
+ .env
131
+ .venv
132
+ env/
133
+ venv/
134
+ ENV/
135
+ env.bak/
136
+ venv.bak/
137
+ venv3.10/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ .idea/
169
+
170
+ # WandB
171
+ # wandb creates a folder to store logs for training runs
172
+ wandb
173
+
174
+ # Runs
175
+ lora-out/*
176
+ qlora-out/*
177
+ mlruns/*
178
+
179
+ /.quarto/
180
+ prepared-datasets/
181
+ submit.sh
182
+ *.out*
183
+
184
+ # Quartodoc generated files
185
+ objects.json
186
+ site_libs/
187
+
188
+ typings/
189
+ out/
190
+
191
+ # vim
192
+ *.swp
.isort.cfg ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [settings]
2
+ profile=black
3
+ known_third_party=wandb,comet_ml
.mypy.ini ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [mypy]
2
+ plugins = pydantic.mypy
3
+ exclude = venv
4
+
5
+ [mypy-alpaca_lora_4bit.*]
6
+ ignore_missing_imports = True
7
+
8
+ [mypy-axolotl.monkeypatch.*]
9
+ ignore_errors = True
10
+
11
+ [mypy-axolotl.models.mixtral.*]
12
+ ignore_errors = True
13
+
14
+ [mypy-axolotl.integrations.liger.models.*]
15
+ ignore_errors = True
16
+
17
+ [mypy-axolotl.models.phi.*]
18
+ ignore_errors = True
19
+
20
+ [mypy-flash_attn.*]
21
+ ignore_missing_imports = True
22
+
23
+ [mypy-huggingface_hub]
24
+ ignore_missing_imports = True
25
+
26
+ [mypy-transformers.*]
27
+ ignore_missing_imports = True
28
+
29
+ [mypy-peft]
30
+ ignore_missing_imports = True
31
+
32
+ [mypy-wandb]
33
+ ignore_missing_imports = True
34
+
35
+ [mypy-bitsandbytes]
36
+ ignore_missing_imports = True
37
+
38
+ [mypy-requests]
39
+ ignore_missing_imports = True
40
+
41
+ [mypy-datasets]
42
+ ignore_missing_imports = True
43
+
44
+ [mypy-fire]
45
+ ignore_missing_imports = True
46
+
47
+ [mypy-setuptools]
48
+ ignore_missing_imports = True
49
+
50
+ [mypy-addict]
51
+ ignore_missing_imports = True
52
+
53
+ [mypy-xformers.*]
54
+ ignore_missing_imports = True
.pre-commit-config.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_language_version:
2
+ python: python3
3
+
4
+ repos:
5
+ - repo: https://github.com/pre-commit/pre-commit-hooks
6
+ rev: v5.0.0
7
+ hooks:
8
+ - id: check-yaml
9
+ - id: end-of-file-fixer
10
+ - id: trailing-whitespace
11
+ - id: no-commit-to-branch
12
+ args: ['--branch', 'main']
13
+ - repo: https://github.com/psf/black
14
+ rev: 25.1.0
15
+ hooks:
16
+ - id: black
17
+ - repo: https://github.com/pycqa/isort
18
+ rev: 6.0.1
19
+ hooks:
20
+ - id: isort
21
+ - repo: https://github.com/PyCQA/flake8
22
+ rev: 7.1.2
23
+ hooks:
24
+ - id: flake8
25
+ - repo: https://github.com/pylint-dev/pylint
26
+ rev: v3.3.6
27
+ hooks:
28
+ - id: pylint
29
+ - repo: https://github.com/pre-commit/mirrors-mypy
30
+ rev: v1.15.0
31
+ hooks:
32
+ - id: mypy
33
+ additional_dependencies:
34
+ [
35
+ 'types-PyYAML',
36
+ 'pydantic>=2.5.3',
37
+ ]
38
+ - repo: https://github.com/PyCQA/bandit
39
+ rev: 1.8.3
40
+ hooks:
41
+ - id: bandit
42
+ args: [
43
+ '--ini',
44
+ '.bandit',
45
+ ]
.pylintrc ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [MASTER]
2
+ init-hook="from pylint.config import find_default_config_files; import sys; sys.path.append(next(find_default_config_files()).parent.as_posix())"
3
+
4
+ [TYPECHECK]
5
+
6
+ # List of members which are set dynamically and missed by Pylint inference
7
+ # system, and so shouldn't trigger E1101 when accessed.
8
+ generated-members=numpy.*, torch.*
9
+
10
+
11
+ [pylint.messages_control]
12
+ disable=missing-function-docstring, line-too-long, import-error,
13
+ too-many-arguments, too-many-locals, too-many-statements, too-many-branches, too-few-public-methods,
14
+ too-many-instance-attributes, fixme, import-outside-toplevel, logging-fstring-interpolation,
15
+ too-many-positional-arguments, possibly-used-before-assignment
FAQS.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # FAQs
2
+
3
+ - Can you train StableLM with this? Yes, but only with a single GPU atm. Multi GPU support is coming soon! Just waiting on this [PR](https://github.com/huggingface/transformers/pull/22874)
4
+ - Will this work with Deepspeed? That's still a WIP, but setting `export ACCELERATE_USE_DEEPSPEED=true` should work in some cases
5
+ - `Error invalid argument at line 359 in file /workspace/bitsandbytes/csrc/pythonInterface.c`
6
+ `/arrow/cpp/src/arrow/filesystem/s3fs.cc:2598: arrow::fs::FinalizeS3 was not called even though S3 was initialized.`
7
+ This could lead to a segmentation fault at exit. Try reinstalling bitsandbytes and transformers from source.
LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
MANIFEST.in ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include requirements.txt
2
+ include README.md
3
+ include LICENSE
4
+ include src/setuptools_axolotl_dynamic_dependencies.py
5
+ recursive-include axolotl *.py
README.md CHANGED
@@ -1,58 +1,5 @@
1
- ---
2
- base_model: google/gemma-3-1b-it
3
- library_name: transformers
4
- model_name: finetune-demo-lora
5
- tags:
6
- - generated_from_trainer
7
- - trl
8
- - sft
9
- licence: license
10
- ---
11
 
12
- # Model Card for finetune-demo-lora
13
 
14
- This model is a fine-tuned version of [google/gemma-3-1b-it](https://huggingface.co/google/gemma-3-1b-it).
15
- It has been trained using [TRL](https://github.com/huggingface/trl).
16
-
17
- ## Quick start
18
-
19
- ```python
20
- from transformers import pipeline
21
-
22
- question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
- generator = pipeline("text-generation", model="rayraycano/finetune-demo-lora", device="cuda")
24
- output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
- print(output["generated_text"])
26
- ```
27
-
28
- ## Training procedure
29
-
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/philipkiely-baseten/baseten-finetune-demo/runs/2tohi2fa)
31
-
32
-
33
- This model was trained with SFT.
34
-
35
- ### Framework versions
36
-
37
- - TRL: 0.15.1
38
- - Transformers: 4.50.0
39
- - Pytorch: 2.6.0+cu124
40
- - Datasets: 3.4.1
41
- - Tokenizers: 0.21.1
42
-
43
- ## Citations
44
-
45
-
46
-
47
- Cite TRL as:
48
-
49
- ```bibtex
50
- @misc{vonwerra2022trl,
51
- title = {{TRL: Transformer Reinforcement Learning}},
52
- author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
- year = 2020,
54
- journal = {GitHub repository},
55
- publisher = {GitHub},
56
- howpublished = {\url{https://github.com/huggingface/trl}}
57
- }
58
- ```
 
1
+ # Baseten Training Demo
 
 
 
 
 
 
 
 
 
2
 
3
+ This is a demo of how to finetune a model using Baseten Training.
4
 
5
+ See the [Get Started](https://docs.baseten.com/get-started) guide to learn more about how to use Baseten Training.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
TODO.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # todo list
2
+
3
+ - [] Validation of parameters for combinations that won't work
4
+
5
+
6
+
7
+ ## things that are known not to work
8
+
9
+ - FSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203
10
+ - adamw_bnb_8bit doesn't play well with FSDP offload
adapter_config.json CHANGED
@@ -24,13 +24,13 @@
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
- "o_proj",
28
- "gate_proj",
29
- "down_proj",
30
  "q_proj",
31
  "v_proj",
32
- "k_proj",
33
- "up_proj"
 
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
 
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
+ "k_proj",
28
+ "up_proj",
 
29
  "q_proj",
30
  "v_proj",
31
+ "o_proj",
32
+ "gate_proj",
33
+ "down_proj"
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b938dfd2d98dfdc9e091d12591b2efcf79364ebc7da12fc49f31f4ec44390e14
3
  size 113632496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f071151423b65de415efa62ec47dc39d7072ce7e56eab9c173f884ab2c52da37
3
  size 113632496
checkpointing_lws.yml ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: leaderworkerset.x-k8s.io/v1
2
+ kind: LeaderWorkerSet
3
+ metadata:
4
+ creationTimestamp: "2025-06-17T22:08:13Z"
5
+ generation: 1
6
+ labels:
7
+ app: baseten-training-job-vq0521w
8
+ baseten.co/job: "true"
9
+ domain: user
10
+ environment: production
11
+ instance_type: H100__2x44x240
12
+ org.identifier: raymond--cano--baseten--co
13
+ org.primary_key: B5W5OLP
14
+ training_job.id: vq0521w
15
+ training_project.id: jwdgnwk
16
+ name: baseten-training-job-vq0521w-multinode
17
+ namespace: org-9914a591b6e04ff7848a21ae64fa3398
18
+ resourceVersion: "236739712"
19
+ uid: fe987c78-34c1-44da-8b0f-64074e101724
20
+ spec:
21
+ leaderWorkerTemplate:
22
+ leaderTemplate:
23
+ metadata:
24
+ labels:
25
+ app: baseten-training-job-vq0521w
26
+ baseten.co/job: "true"
27
+ baseten.co/job_type: training
28
+ domain: user
29
+ environment: production
30
+ instance_type: H100__2x44x240
31
+ org.identifier: raymond--cano--baseten--co
32
+ org.primary_key: B5W5OLP
33
+ training_job.id: vq0521w
34
+ training_project.id: jwdgnwk
35
+ name: baseten-training-job-vq0521w
36
+ namespace: org-9914a591b6e04ff7848a21ae64fa3398
37
+ spec:
38
+ affinity:
39
+ nodeAffinity:
40
+ preferredDuringSchedulingIgnoredDuringExecution:
41
+ - preference:
42
+ matchExpressions:
43
+ - key: baseten-internal/workload-type
44
+ operator: In
45
+ values:
46
+ - training
47
+ weight: 100
48
+ - preference:
49
+ matchExpressions:
50
+ - key: baseten-internal/customer-reservation
51
+ operator: Exists
52
+ weight: 90
53
+ automountServiceAccountToken: false
54
+ containers:
55
+ - command:
56
+ - sh
57
+ - -c
58
+ - |-
59
+ # NB(nikhil): We prevent excessive restarts:
60
+ # 1) If the job completed successfully, we use a sentinel file to sleep on next start
61
+ # 2) If the job is crashing, we use an attempt counter to track if we should start again
62
+ BT_RETRY_COUNT="${BT_RETRY_COUNT:-1}"
63
+ ATTEMPT_COUNT_FILE="/bt_state/attempt_count.txt"
64
+
65
+
66
+ if [ -f "/bt_state/completed.txt" ]; then
67
+ echo "Job already completed successfully, sleeping..."
68
+ sleep infinity
69
+ fi
70
+
71
+ attempt_count=0
72
+ if [ -f "$ATTEMPT_COUNT_FILE" ]; then
73
+ attempt_count="$(cat "$ATTEMPT_COUNT_FILE")"
74
+ fi
75
+
76
+ if [ "$attempt_count" -ge "$BT_RETRY_COUNT" ]; then
77
+ echo "Exceeded ${BT_RETRY_COUNT} retry limit with ${attempt_count} attempts, sleeping..."
78
+ sleep infinity
79
+ fi
80
+
81
+ # Increment attempt count across runs
82
+ attempt_count=$((attempt_count + 1))
83
+ echo "$attempt_count" > "$ATTEMPT_COUNT_FILE"
84
+
85
+ echo "Copying artifacts to ($PWD)"
86
+ cp -r /b10/workspace/* ./
87
+
88
+ EXIT_CODE=0
89
+ if [ "$EXIT_CODE" -eq 0 ]; then
90
+ /bin/sh -c './run.sh' || EXIT_CODE=$?
91
+ fi
92
+ completed_date=$(date +%s)
93
+ echo "Job has exited. Syncing checkpoints..."
94
+ while true; do
95
+ if [ -f "/bt_state/sync_start.txt" ]; then
96
+ sync_start_date=$(cat "/bt_state/sync_start.txt")
97
+ if [ "$sync_start_date" -gt "$completed_date" ]; then
98
+ echo "Checkpoint sync completed."
99
+ break
100
+ fi
101
+ fi
102
+ sleep 10
103
+ done
104
+
105
+ if [ "$EXIT_CODE" -eq 0 ]; then
106
+ touch "/bt_state/completed.txt"
107
+ fi
108
+
109
+ exit "$EXIT_CODE"
110
+ env:
111
+ - name: PYTHONUNBUFFERED
112
+ value: "1"
113
+ - name: IS_LOCAL_DEV
114
+ value: "False"
115
+ - name: BT_TRAINING_JOB_ID
116
+ value: vq0521w
117
+ - name: BT_RETRY_COUNT
118
+ value: "1"
119
+ - name: BASE_MODEL_ID
120
+ value: google/gemma-3-27b-it
121
+ - name: OUTPUT_LORA_REPO_ID
122
+ value: rayraycano/finetune-demo-lora
123
+ - name: HF_TOKEN
124
+ valueFrom:
125
+ secretKeyRef:
126
+ key: hf_access_token
127
+ name: baseten-oracles-secrets
128
+ - name: WANDB_API_KEY
129
+ valueFrom:
130
+ secretKeyRef:
131
+ key: wandb_api_key
132
+ name: baseten-oracles-secrets
133
+ - name: BT_NODE_RANK
134
+ valueFrom:
135
+ fieldRef:
136
+ fieldPath: metadata.labels['leaderworkerset.sigs.k8s.io/worker-index']
137
+ - name: BT_NUM_GPUS
138
+ value: "2"
139
+ - name: BT_LEADER_ADDR
140
+ value: $(LWS_LEADER_ADDRESS)
141
+ - name: BT_GROUP_SIZE
142
+ value: $(LWS_GROUP_SIZE)
143
+ - name: BT_RW_CACHE_DIR
144
+ value: /root/.cache/user_artifacts
145
+ - name: BT_CHECKPOINT_DIR
146
+ value: /mnt/ckpts
147
+ image: axolotlai/axolotl:main-20250324-py3.11-cu124-2.6.0
148
+ name: training-job-container
149
+ resources:
150
+ limits:
151
+ cpu: 43550m
152
+ ephemeral-storage: 800Gi
153
+ memory: 240354Mi
154
+ nvidia.com/gpu: "2"
155
+ requests:
156
+ cpu: 42800m
157
+ ephemeral-storage: 800Gi
158
+ memory: 239354Mi
159
+ nvidia.com/gpu: "2"
160
+ securityContext:
161
+ capabilities:
162
+ add:
163
+ - SYS_ADMIN
164
+ drop:
165
+ - CAP_NET_RAW
166
+ - CAP_NET_ADMIN
167
+ volumeMounts:
168
+ - mountPath: /lib/modules
169
+ name: host-modules
170
+ readOnly: true
171
+ - mountPath: /dev/fuse
172
+ name: fuse
173
+ - mountPath: /dev/shm
174
+ name: shm
175
+ - mountPath: /secrets
176
+ name: secrets
177
+ readOnly: true
178
+ - mountPath: /bt_state
179
+ name: bt-state
180
+ - mountPath: /b10/workspace
181
+ name: runtime-artifacts
182
+ readOnly: true
183
+ - mountPath: /root/.cache/user_artifacts
184
+ name: baseten-fs-rw-cache
185
+ subPath: jwdgnwk/user_artifacts
186
+ - mountPath: /root/.cache/huggingface
187
+ name: baseten-fs-rw-cache
188
+ subPath: jwdgnwk/huggingface
189
+ - mountPath: /mnt/ckpts
190
+ name: ckpt-shared-storage
191
+ - command:
192
+ - /bin/sh
193
+ - -c
194
+ - |
195
+ while [ ! -f "/bt_state/completed.txt" ]; do
196
+ start_time=$(date +%s)
197
+ # We reference these secrets explicitly because they might rotate during the job
198
+ export AWS_ACCESS_KEY_ID=$(cat /aws-secrets/aws_access_key_id)
199
+ export AWS_SECRET_ACCESS_KEY=$(cat /aws-secrets/aws_secret_access_key)
200
+ export AWS_SESSION_TOKEN=$(cat /aws-secrets/aws_session_token)
201
+ # dry run to provide visibility into what will be synced. This can be checked to verify that we aren't making duplicate syncs.
202
+ echo "======= Dry run to provide visibility into what will be synced ======="
203
+ aws s3 sync /mnt/ckpts s3://bt-training-b5w5olp-8de8b19b-6cba-465a-a5fe-d6e16b627cc5/training_projects/jwdgnwk/jobs/vq0521w/rank-$BT_NODE_RANK/ --dryrun
204
+ echo "======= Syncing checkpoints to S3 ======="
205
+ aws s3 sync /mnt/ckpts s3://bt-training-b5w5olp-8de8b19b-6cba-465a-a5fe-d6e16b627cc5/training_projects/jwdgnwk/jobs/vq0521w/rank-$BT_NODE_RANK/
206
+ # write the start time after writing. This helps ensure that we complete the sync
207
+ # that starts after the job has completed.
208
+ echo "$start_time" > "/bt_state/sync_start.txt"
209
+ sleep 30
210
+ done
211
+ exit 0
212
+ env:
213
+ - name: AWS_DEFAULT_REGION
214
+ value: us-west-2
215
+ - name: BT_NODE_RANK
216
+ valueFrom:
217
+ fieldRef:
218
+ fieldPath: metadata.labels['leaderworkerset.sigs.k8s.io/worker-index']
219
+ image: amazon/aws-cli:2.13.25
220
+ name: s3-sync-sidecar
221
+ resources:
222
+ limits:
223
+ cpu: 200m
224
+ memory: 512Mi
225
+ requests:
226
+ cpu: 100m
227
+ memory: 256Mi
228
+ volumeMounts:
229
+ - mountPath: /mnt/ckpts
230
+ name: ckpt-shared-storage
231
+ - mountPath: /bt_state
232
+ name: bt-state
233
+ - mountPath: /aws-secrets
234
+ name: aws-secrets
235
+ readOnly: true
236
+ initContainers:
237
+ - command:
238
+ - sh
239
+ - -c
240
+ - |
241
+ set -e
242
+ apk add --no-cache wget
243
+ echo "Downloading artifact 1"
244
+ wget -q "https://baseten-user-models-2971lo7k.s3.amazonaws.com/organizations/B5W5OLP/training/bea8e66c-d732-4b8b-b265-18c0bbfa506f/scripts.tgz?AWSAccessKeyId=AKIA4FWSEGGQU5RCDCKX&Signature=8xoXJZmRxjwXXJPh4FTLN17Wv%2BY%3D&Expires=1750201691" -O artifact-1.tar
245
+ echo "Extracting artifact 1"
246
+ tar -xvf artifact-1.tar -C /b10/workspace
247
+ rm artifact-1.tar
248
+ image: alpine:3.21.3
249
+ imagePullPolicy: IfNotPresent
250
+ name: fetch-runtime-artifacts
251
+ resources: {}
252
+ volumeMounts:
253
+ - mountPath: /b10/workspace
254
+ name: runtime-artifacts
255
+ - command:
256
+ - sh
257
+ - -c
258
+ - |
259
+ set -e
260
+ echo "Mounting RW cache"
261
+ cd /mnt
262
+ mkdir -p jwdgnwk/user_artifacts
263
+ mkdir -p jwdgnwk/huggingface
264
+ image: alpine:3.21.3
265
+ imagePullPolicy: IfNotPresent
266
+ name: rw-cache-init-container
267
+ resources: {}
268
+ volumeMounts:
269
+ - mountPath: /mnt
270
+ name: baseten-fs-rw-cache
271
+ nodeSelector:
272
+ baseten.co/gpu-type: nvidia-h100-80gb
273
+ nvidia.com/gpu.product: NVIDIA-H100-80GB-HBM3
274
+ priorityClassName: user-model-priority
275
+ serviceAccountName: baseten-user
276
+ tolerations:
277
+ - effect: NoSchedule
278
+ key: baseten-internal/customer-reservation
279
+ operator: Equal
280
+ value: raymond--cano--baseten--co
281
+ - effect: NoSchedule
282
+ key: baseten-internal/workload-type
283
+ operator: Equal
284
+ value: training
285
+ volumes:
286
+ - emptyDir:
287
+ medium: Memory
288
+ sizeLimit: 128Gi
289
+ name: shm
290
+ - name: secrets
291
+ secret:
292
+ secretName: baseten-oracles-secrets
293
+ - name: aws-secrets
294
+ secret:
295
+ secretName: baseten-training-checkpointing-secrets
296
+ - hostPath:
297
+ path: /lib/modules
298
+ type: Directory
299
+ name: host-modules
300
+ - hostPath:
301
+ path: /dev/fuse
302
+ name: fuse
303
+ - emptyDir:
304
+ medium: Memory
305
+ sizeLimit: 1Mi
306
+ name: bt-state
307
+ - emptyDir:
308
+ sizeLimit: 15Gi
309
+ name: runtime-artifacts
310
+ - name: baseten-fs-rw-cache
311
+ persistentVolumeClaim:
312
+ claimName: org-9914a591b6e04ff7848a21ae64fa3398-training-fs-pvc
313
+ - ephemeral:
314
+ volumeClaimTemplate:
315
+ metadata: {}
316
+ spec:
317
+ accessModes:
318
+ - ReadWriteOnce
319
+ resources:
320
+ requests:
321
+ storage: 1000Gi
322
+ name: ckpt-shared-storage
323
+ restartPolicy: None
324
+ size: 1
325
+ workerTemplate:
326
+ metadata:
327
+ labels:
328
+ app: baseten-training-job-vq0521w
329
+ baseten.co/job: "true"
330
+ baseten.co/job_type: training
331
+ domain: user
332
+ environment: production
333
+ instance_type: H100__2x44x240
334
+ org.identifier: raymond--cano--baseten--co
335
+ org.primary_key: B5W5OLP
336
+ training_job.id: vq0521w
337
+ training_project.id: jwdgnwk
338
+ name: baseten-training-job-vq0521w
339
+ namespace: org-9914a591b6e04ff7848a21ae64fa3398
340
+ spec:
341
+ affinity:
342
+ nodeAffinity:
343
+ preferredDuringSchedulingIgnoredDuringExecution:
344
+ - preference:
345
+ matchExpressions:
346
+ - key: baseten-internal/workload-type
347
+ operator: In
348
+ values:
349
+ - training
350
+ weight: 100
351
+ - preference:
352
+ matchExpressions:
353
+ - key: baseten-internal/customer-reservation
354
+ operator: Exists
355
+ weight: 90
356
+ automountServiceAccountToken: false
357
+ containers:
358
+ - command:
359
+ - sh
360
+ - -c
361
+ - |-
362
+ # NB(nikhil): We prevent excessive restarts:
363
+ # 1) If the job completed successfully, we use a sentinel file to sleep on next start
364
+ # 2) If the job is crashing, we use an attempt counter to track if we should start again
365
+ BT_RETRY_COUNT="${BT_RETRY_COUNT:-1}"
366
+ ATTEMPT_COUNT_FILE="/bt_state/attempt_count.txt"
367
+
368
+
369
+ if [ -f "/bt_state/completed.txt" ]; then
370
+ echo "Job already completed successfully, sleeping..."
371
+ sleep infinity
372
+ fi
373
+
374
+ attempt_count=0
375
+ if [ -f "$ATTEMPT_COUNT_FILE" ]; then
376
+ attempt_count="$(cat "$ATTEMPT_COUNT_FILE")"
377
+ fi
378
+
379
+ if [ "$attempt_count" -ge "$BT_RETRY_COUNT" ]; then
380
+ echo "Exceeded ${BT_RETRY_COUNT} retry limit with ${attempt_count} attempts, sleeping..."
381
+ sleep infinity
382
+ fi
383
+
384
+ # Increment attempt count across runs
385
+ attempt_count=$((attempt_count + 1))
386
+ echo "$attempt_count" > "$ATTEMPT_COUNT_FILE"
387
+
388
+ echo "Copying artifacts to ($PWD)"
389
+ cp -r /b10/workspace/* ./
390
+
391
+ EXIT_CODE=0
392
+ if [ "$EXIT_CODE" -eq 0 ]; then
393
+ /bin/sh -c './run.sh' || EXIT_CODE=$?
394
+ fi
395
+ completed_date=$(date +%s)
396
+ echo "Job has exited. Syncing checkpoints..."
397
+ while true; do
398
+ if [ -f "/bt_state/sync_start.txt" ]; then
399
+ sync_start_date=$(cat "/bt_state/sync_start.txt")
400
+ if [ "$sync_start_date" -gt "$completed_date" ]; then
401
+ echo "Checkpoint sync completed."
402
+ break
403
+ fi
404
+ fi
405
+ sleep 10
406
+ done
407
+
408
+ if [ "$EXIT_CODE" -eq 0 ]; then
409
+ touch "/bt_state/completed.txt"
410
+ fi
411
+
412
+ exit "$EXIT_CODE"
413
+ env:
414
+ - name: PYTHONUNBUFFERED
415
+ value: "1"
416
+ - name: IS_LOCAL_DEV
417
+ value: "False"
418
+ - name: BT_TRAINING_JOB_ID
419
+ value: vq0521w
420
+ - name: BT_RETRY_COUNT
421
+ value: "1"
422
+ - name: BASE_MODEL_ID
423
+ value: google/gemma-3-27b-it
424
+ - name: OUTPUT_LORA_REPO_ID
425
+ value: rayraycano/finetune-demo-lora
426
+ - name: HF_TOKEN
427
+ valueFrom:
428
+ secretKeyRef:
429
+ key: hf_access_token
430
+ name: baseten-oracles-secrets
431
+ - name: WANDB_API_KEY
432
+ valueFrom:
433
+ secretKeyRef:
434
+ key: wandb_api_key
435
+ name: baseten-oracles-secrets
436
+ - name: BT_NODE_RANK
437
+ valueFrom:
438
+ fieldRef:
439
+ fieldPath: metadata.labels['leaderworkerset.sigs.k8s.io/worker-index']
440
+ - name: BT_NUM_GPUS
441
+ value: "2"
442
+ - name: BT_LEADER_ADDR
443
+ value: $(LWS_LEADER_ADDRESS)
444
+ - name: BT_GROUP_SIZE
445
+ value: $(LWS_GROUP_SIZE)
446
+ - name: BT_RW_CACHE_DIR
447
+ value: /root/.cache/user_artifacts
448
+ - name: BT_CHECKPOINT_DIR
449
+ value: /mnt/ckpts
450
+ image: axolotlai/axolotl:main-20250324-py3.11-cu124-2.6.0
451
+ name: training-job-container
452
+ resources:
453
+ limits:
454
+ cpu: 43550m
455
+ ephemeral-storage: 800Gi
456
+ memory: 240354Mi
457
+ nvidia.com/gpu: "2"
458
+ requests:
459
+ cpu: 42800m
460
+ ephemeral-storage: 800Gi
461
+ memory: 239354Mi
462
+ nvidia.com/gpu: "2"
463
+ securityContext:
464
+ capabilities:
465
+ add:
466
+ - SYS_ADMIN
467
+ drop:
468
+ - CAP_NET_RAW
469
+ - CAP_NET_ADMIN
470
+ volumeMounts:
471
+ - mountPath: /lib/modules
472
+ name: host-modules
473
+ readOnly: true
474
+ - mountPath: /dev/fuse
475
+ name: fuse
476
+ - mountPath: /dev/shm
477
+ name: shm
478
+ - mountPath: /secrets
479
+ name: secrets
480
+ readOnly: true
481
+ - mountPath: /bt_state
482
+ name: bt-state
483
+ - mountPath: /b10/workspace
484
+ name: runtime-artifacts
485
+ readOnly: true
486
+ - mountPath: /root/.cache/user_artifacts
487
+ name: baseten-fs-rw-cache
488
+ subPath: jwdgnwk/user_artifacts
489
+ - mountPath: /root/.cache/huggingface
490
+ name: baseten-fs-rw-cache
491
+ subPath: jwdgnwk/huggingface
492
+ - mountPath: /mnt/ckpts
493
+ name: ckpt-shared-storage
494
+ - command:
495
+ - /bin/sh
496
+ - -c
497
+ - |-
498
+ while [ ! -f "/bt_state/completed.txt" ]; do
499
+ start_time=$(date +%s)
500
+ # We reference these secrets explicitly because they might rotate during the job
501
+ export AWS_ACCESS_KEY_ID=$(cat /aws-secrets/aws_access_key_id)
502
+ export AWS_SECRET_ACCESS_KEY=$(cat /aws-secrets/aws_secret_access_key)
503
+ export AWS_SESSION_TOKEN=$(cat /aws-secrets/aws_session_token)
504
+ # dry run to provide visibility into what will be synced. This can be checked to verify that we aren't making duplicate syncs.
505
+ echo "======= Dry run to provide visibility into what will be synced ======="
506
+ aws s3 sync /mnt/ckpts s3://bt-training-b5w5olp-8de8b19b-6cba-465a-a5fe-d6e16b627cc5/training_projects/jwdgnwk/jobs/vq0521w/rank-$BT_NODE_RANK/ --dryrun
507
+ echo "======= Syncing checkpoints to S3 ======="
508
+ aws s3 sync /mnt/ckpts s3://bt-training-b5w5olp-8de8b19b-6cba-465a-a5fe-d6e16b627cc5/training_projects/jwdgnwk/jobs/vq0521w/rank-$BT_NODE_RANK/
509
+ # write the start time after writing. This helps ensure that we complete the sync
510
+ # that starts after the job has completed.
511
+ echo "$start_time" > "/bt_state/sync_start.txt"
512
+ sleep 30
513
+ done
514
+ exit 0
515
+ env:
516
+ - name: AWS_DEFAULT_REGION
517
+ value: us-west-2
518
+ - name: BT_NODE_RANK
519
+ valueFrom:
520
+ fieldRef:
521
+ fieldPath: metadata.labels['leaderworkerset.sigs.k8s.io/worker-index']
522
+ image: amazon/aws-cli:2.13.25
523
+ name: s3-sync-sidecar
524
+ resources:
525
+ limits:
526
+ cpu: 200m
527
+ memory: 512Mi
528
+ requests:
529
+ cpu: 100m
530
+ memory: 256Mi
531
+ volumeMounts:
532
+ - mountPath: /mnt/ckpts
533
+ name: ckpt-shared-storage
534
+ - mountPath: /bt_state
535
+ name: bt-state
536
+ - mountPath: /aws-secrets
537
+ name: aws-secrets
538
+ readOnly: true
539
+ initContainers:
540
+ - command:
541
+ - sh
542
+ - -c
543
+ - |
544
+ set -e
545
+ apk add --no-cache wget
546
+ echo "Downloading artifact 1"
547
+ wget -q "https://baseten-user-models-2971lo7k.s3.amazonaws.com/organizations/B5W5OLP/training/bea8e66c-d732-4b8b-b265-18c0bbfa506f/scripts.tgz?AWSAccessKeyId=AKIA4FWSEGGQU5RCDCKX&Signature=8xoXJZmRxjwXXJPh4FTLN17Wv%2BY%3D&Expires=1750201691" -O artifact-1.tar
548
+ echo "Extracting artifact 1"
549
+ tar -xvf artifact-1.tar -C /b10/workspace
550
+ rm artifact-1.tar
551
+ image: alpine:3.21.3
552
+ imagePullPolicy: IfNotPresent
553
+ name: fetch-runtime-artifacts
554
+ resources: {}
555
+ volumeMounts:
556
+ - mountPath: /b10/workspace
557
+ name: runtime-artifacts
558
+ - command:
559
+ - sh
560
+ - -c
561
+ - |
562
+ set -e
563
+ echo "Mounting RW cache"
564
+ cd /mnt
565
+ mkdir -p jwdgnwk/user_artifacts
566
+ mkdir -p jwdgnwk/huggingface
567
+ image: alpine:3.21.3
568
+ imagePullPolicy: IfNotPresent
569
+ name: rw-cache-init-container
570
+ resources: {}
571
+ volumeMounts:
572
+ - mountPath: /mnt
573
+ name: baseten-fs-rw-cache
574
+ nodeSelector:
575
+ baseten.co/gpu-type: nvidia-h100-80gb
576
+ nvidia.com/gpu.product: NVIDIA-H100-80GB-HBM3
577
+ priorityClassName: user-model-priority
578
+ serviceAccountName: baseten-user
579
+ tolerations:
580
+ - effect: NoSchedule
581
+ key: baseten-internal/customer-reservation
582
+ operator: Equal
583
+ value: raymond--cano--baseten--co
584
+ - effect: NoSchedule
585
+ key: baseten-internal/workload-type
586
+ operator: Equal
587
+ value: training
588
+ volumes:
589
+ - emptyDir:
590
+ medium: Memory
591
+ sizeLimit: 128Gi
592
+ name: shm
593
+ - name: secrets
594
+ secret:
595
+ secretName: baseten-oracles-secrets
596
+ - name: aws-secrets
597
+ secret:
598
+ secretName: baseten-training-checkpointing-secrets
599
+ - hostPath:
600
+ path: /lib/modules
601
+ type: Directory
602
+ name: host-modules
603
+ - hostPath:
604
+ path: /dev/fuse
605
+ name: fuse
606
+ - emptyDir:
607
+ medium: Memory
608
+ sizeLimit: 1Mi
609
+ name: bt-state
610
+ - emptyDir:
611
+ sizeLimit: 15Gi
612
+ name: runtime-artifacts
613
+ - name: baseten-fs-rw-cache
614
+ persistentVolumeClaim:
615
+ claimName: org-9914a591b6e04ff7848a21ae64fa3398-training-fs-pvc
616
+ - ephemeral:
617
+ volumeClaimTemplate:
618
+ metadata: {}
619
+ spec:
620
+ accessModes:
621
+ - ReadWriteOnce
622
+ resources:
623
+ requests:
624
+ storage: 1000Gi
625
+ name: ckpt-shared-storage
626
+ networkConfig:
627
+ subdomainPolicy: Shared
628
+ replicas: 1
629
+ rolloutStrategy:
630
+ rollingUpdateConfiguration:
631
+ maxSurge: 0
632
+ maxUnavailable: 1
633
+ type: RollingUpdate
634
+ startupPolicy: LeaderReady
635
+ status:
636
+ conditions:
637
+ - lastTransitionTime: "2025-06-17T22:08:13Z"
638
+ message: Replicas are progressing
639
+ reason: GroupsProgressing
640
+ status: "False"
641
+ type: Progressing
642
+ - lastTransitionTime: "2025-06-17T22:08:30Z"
643
+ message: All replicas are ready
644
+ reason: AllGroupsReady
645
+ status: "True"
646
+ type: Available
647
+ hpaPodSelector: leaderworkerset.sigs.k8s.io/name=baseten-training-job-vq0521w-multinode,leaderworkerset.sigs.k8s.io/worker-index=0
648
+ readyReplicas: 1
649
+ replicas: 1
650
+ updatedReplicas: 1
cicd/Dockerfile.jinja ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM axolotlai/axolotl-base:{{ BASE_TAG }}
2
+
3
+ ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
4
+ ENV AXOLOTL_EXTRAS="{{ AXOLOTL_EXTRAS }}"
5
+ ENV AXOLOTL_ARGS="{{ AXOLOTL_ARGS }}"
6
+ ENV CUDA="{{ CUDA }}"
7
+ ENV PYTORCH_VERSION="{{ PYTORCH_VERSION }}"
8
+ ENV GITHUB_REF="{{ GITHUB_REF }}"
9
+ ENV GITHUB_SHA="{{ GITHUB_SHA }}"
10
+ ENV NIGHTLY_BUILD="{{ NIGHTLY_BUILD }}"
11
+ ENV HF_HOME="{{ HF_HOME }}"
12
+
13
+ RUN apt-get update && \
14
+ apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev
15
+
16
+ WORKDIR /workspace
17
+
18
+ RUN git clone --depth=1 https://github.com/axolotl-ai-cloud/axolotl.git
19
+
20
+ WORKDIR /workspace/axolotl
21
+
22
+ RUN git fetch origin +$GITHUB_REF && \
23
+ git checkout FETCH_HEAD
24
+
25
+ # If AXOLOTL_EXTRAS is set, append it in brackets
26
+ RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
27
+ sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt; \
28
+ sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt; \
29
+ sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt; \
30
+ sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt; \
31
+ sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
32
+ fi
33
+
34
+ RUN pip install packaging==23.2 setuptools==75.8.0
35
+ RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
36
+ pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
37
+ else \
38
+ pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
39
+ fi
40
+
41
+ RUN python scripts/unsloth_install.py | sh
42
+ RUN python scripts/cutcrossentropy_install.py | sh
43
+
44
+ # So we can test the Docker image
45
+ RUN pip install -r requirements-dev.txt -r requirements-tests.txt
46
+
47
+ # fix so that git fetch/pull from remote works
48
+ RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
49
+ git config --get remote.origin.fetch
50
+
51
+ # helper for huggingface-login cli
52
+ RUN git config --global credential.helper store
cicd/cicd.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__"
5
+
6
+ pytest -v --durations=10 -n8 --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli /workspace/axolotl/tests/
7
+ pytest -v --durations=10 /workspace/axolotl/tests/e2e/patched/lora_kernels # running these with the other patches causes a failure
8
+ pytest -v --durations=10 --ignore=tests/e2e/patched/lora_kernels /workspace/axolotl/tests/e2e/patched
9
+ pytest -v --durations=10 -n1 /workspace/axolotl/tests/e2e/solo/
10
+ pytest -v --durations=10 /workspace/axolotl/tests/e2e/integrations/
11
+ pytest -v --durations=10 /workspace/axolotl/tests/cli
12
+ pytest -v --durations=10 --ignore=tests/e2e/solo/ --ignore=tests/e2e/patched/ --ignore=tests/e2e/multigpu/ --ignore=tests/e2e/integrations/ --ignore=tests/cli /workspace/axolotl/tests/e2e/
cicd/multigpu.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modal application to run axolotl gpu tests in Modal
3
+ """
4
+
5
+ # pylint: disable=duplicate-code
6
+
7
+ import os
8
+ import pathlib
9
+ import tempfile
10
+
11
+ import jinja2
12
+ import modal
13
+ from jinja2 import select_autoescape
14
+ from modal import App, Image
15
+
16
+ cicd_path = pathlib.Path(__file__).parent.resolve()
17
+
18
+ template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
19
+ template_env = jinja2.Environment(
20
+ loader=template_loader, autoescape=select_autoescape()
21
+ )
22
+ df_template = template_env.get_template("Dockerfile.jinja")
23
+
24
+ df_args = {
25
+ "AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
26
+ "AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
27
+ "PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
28
+ "BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
29
+ "CUDA": os.environ.get("CUDA", "121"),
30
+ "GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
31
+ "GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
32
+ "HF_HOME": "/workspace/data/huggingface-cache/hub",
33
+ }
34
+
35
+ dockerfile_contents = df_template.render(**df_args)
36
+
37
+ temp_dir = tempfile.mkdtemp()
38
+ with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
39
+ f.write(dockerfile_contents)
40
+
41
+ cicd_image = Image.from_dockerfile(
42
+ pathlib.Path(temp_dir) / "Dockerfile",
43
+ force_build=True,
44
+ gpu="A10G",
45
+ ).env(df_args)
46
+
47
+ app = App("Axolotl CI/CD", secrets=[])
48
+
49
+ hf_cache_volume = modal.Volume.from_name(
50
+ "axolotl-ci-hf-hub-cache", create_if_missing=True
51
+ )
52
+ VOLUME_CONFIG = {
53
+ "/workspace/data/huggingface-cache/hub": hf_cache_volume,
54
+ }
55
+
56
+ N_GPUS = int(os.environ.get("N_GPUS", 2))
57
+ GPU_CONFIG = modal.gpu.H100(count=N_GPUS)
58
+
59
+
60
+ def run_cmd(cmd: str, run_folder: str):
61
+ import subprocess # nosec
62
+
63
+ # Propagate errors from subprocess.
64
+ if exit_code := subprocess.call(cmd.split(), cwd=run_folder): # nosec
65
+ exit(exit_code) # pylint: disable=consider-using-sys-exit
66
+
67
+
68
+ @app.function(
69
+ image=cicd_image,
70
+ gpu=GPU_CONFIG,
71
+ timeout=60 * 60,
72
+ cpu=8.0,
73
+ memory=131072 * N_GPUS,
74
+ volumes=VOLUME_CONFIG,
75
+ )
76
+ def cicd_pytest():
77
+ run_cmd("./cicd/multigpu.sh", "/workspace/axolotl")
78
+
79
+
80
+ @app.local_entrypoint()
81
+ def main():
82
+ cicd_pytest.remote()
cicd/multigpu.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # only run one test at a time so as not to OOM the GPU
5
+ pytest -v -n2 /workspace/axolotl/tests/e2e/multigpu/
cicd/tests.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Modal app to run axolotl GPU tests"""
2
+
3
+ # pylint: disable=duplicate-code
4
+
5
+ import os
6
+ import pathlib
7
+ import tempfile
8
+
9
+ import jinja2
10
+ import modal
11
+ from jinja2 import select_autoescape
12
+ from modal import App, Image
13
+
14
+ cicd_path = pathlib.Path(__file__).parent.resolve()
15
+
16
+ template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
17
+ template_env = jinja2.Environment(
18
+ loader=template_loader, autoescape=select_autoescape()
19
+ )
20
+ df_template = template_env.get_template("Dockerfile.jinja")
21
+
22
+ df_args = {
23
+ "AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
24
+ "AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
25
+ "PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
26
+ "BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
27
+ "CUDA": os.environ.get("CUDA", "121"),
28
+ "GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
29
+ "GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
30
+ "NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
31
+ "HF_HOME": "/workspace/data/huggingface-cache/hub",
32
+ }
33
+
34
+ dockerfile_contents = df_template.render(**df_args)
35
+
36
+ temp_dir = tempfile.mkdtemp()
37
+ with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
38
+ f.write(dockerfile_contents)
39
+
40
+ cicd_image = Image.from_dockerfile(
41
+ pathlib.Path(temp_dir) / "Dockerfile",
42
+ context_mount=None,
43
+ force_build=True,
44
+ gpu="A10G",
45
+ ).env(df_args)
46
+
47
+ app = App("Axolotl CI/CD", secrets=[])
48
+
49
+ hf_cache_volume = modal.Volume.from_name(
50
+ "axolotl-ci-hf-hub-cache", create_if_missing=True
51
+ )
52
+ VOLUME_CONFIG = {
53
+ "/workspace/data/huggingface-cache/hub": hf_cache_volume,
54
+ }
55
+
56
+ N_GPUS = int(os.environ.get("N_GPUS", 1))
57
+ GPU_CONFIG = modal.gpu.L40S(count=N_GPUS)
58
+
59
+
60
+ def run_cmd(cmd: str, run_folder: str):
61
+ import subprocess # nosec
62
+
63
+ # Propagate errors from subprocess.
64
+ if exit_code := subprocess.call(cmd.split(), cwd=run_folder): # nosec
65
+ exit(exit_code) # pylint: disable=consider-using-sys-exit
66
+
67
+
68
+ @app.function(
69
+ image=cicd_image,
70
+ gpu=GPU_CONFIG,
71
+ timeout=60 * 60,
72
+ cpu=8.0,
73
+ memory=131072,
74
+ volumes=VOLUME_CONFIG,
75
+ )
76
+ def cicd_pytest():
77
+ run_cmd("./cicd/cicd.sh", "/workspace/axolotl")
78
+
79
+
80
+ @app.local_entrypoint()
81
+ def main():
82
+ cicd_pytest.remote()
config.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from truss_train import definitions
2
+ from truss.base import truss_config
3
+
4
+
5
+ """
6
+ Runtime provides runtime options for the training job. See the docs to learn more
7
+ about configuring Training Cache and Automatic Checkpointing.
8
+ """
9
+ runtime = definitions.Runtime(
10
+ start_commands=[
11
+ "/bin/sh -c './run.sh'",
12
+ ],
13
+ environment_variables={
14
+ # Make sure these secrets are set in your Baseten Workspace
15
+ "HF_TOKEN": definitions.SecretReference(name="hf_access_token"),
16
+ "WANDB_API_KEY": definitions.SecretReference(name="wandb_api_key"),
17
+ "BASE_MODEL_ID": "google/gemma-3-27b-it",
18
+ "OUTPUT_LORA_REPO_ID": "rayraycano/finetune-demo-lora", # TODO: your HF Repo ID
19
+ },
20
+ enable_cache=True,
21
+ checkpointing_config=definitions.CheckpointingConfig(
22
+ enabled=False,
23
+ ),
24
+ )
25
+
26
+ """
27
+ Compute allows you to specify the hardware required for the training job. See the docs to learn more
28
+ about configuring multinode training.
29
+ """
30
+ compute = definitions.Compute(
31
+ accelerator=truss_config.AcceleratorSpec(
32
+ accelerator=truss_config.Accelerator.H100,
33
+ count=8,
34
+ ),
35
+ # node_count=2,
36
+ )
37
+
38
+ """
39
+ TrainingJob is the main configuration object for your training job. It includes the compute, runtime, and image.
40
+ """
41
+ training_job = definitions.TrainingJob(
42
+ compute=compute,
43
+ runtime=runtime,
44
+ # axolotl image includes most of the dependencies you need for training
45
+ image=definitions.Image(base_image="axolotlai/axolotl:main-20250324-py3.11-cu124-2.6.0"),
46
+ )
47
+
48
+ """
49
+ TrainingProject is an organizational tool to group your training jobs.
50
+ """
51
+ first_project = definitions.TrainingProject(name="finetune-demo-full-feature-cru-us-east1-prod-1", job=training_job)
52
+
53
+
deepspeed_config.yml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ deepspeed_config:
4
+ deepspeed_multinode_launcher: standard
5
+ offload_optimizer_device: none
6
+ offload_param_device: none
7
+ zero3_init_flag: true
8
+ zero3_save_16bit_model: true
9
+ zero_stage: 3
10
+ distributed_type: DEEPSPEED
11
+ downcast_bf16: 'no'
12
+ machine_rank: 0
13
+ main_training_function: main
14
+ mixed_precision: bf16
15
+ num_machines: 1
16
+ rdzv_backend: static
17
+ same_network: true
18
+ tpu_env: []
19
+ tpu_use_cluster: false
20
+ tpu_use_sudo: false
21
+ use_cpu: false
deepspeed_configs/zero1.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 1,
4
+ "overlap_comm": true
5
+ },
6
+ "bf16": {
7
+ "enabled": "auto"
8
+ },
9
+ "fp16": {
10
+ "enabled": "auto",
11
+ "auto_cast": false,
12
+ "loss_scale": 0,
13
+ "initial_scale_power": 32,
14
+ "loss_scale_window": 1000,
15
+ "hysteresis": 2,
16
+ "min_loss_scale": 1
17
+ },
18
+ "gradient_accumulation_steps": "auto",
19
+ "gradient_clipping": "auto",
20
+ "train_batch_size": "auto",
21
+ "train_micro_batch_size_per_gpu": "auto",
22
+ "wall_clock_breakdown": false
23
+ }
deepspeed_configs/zero1_torch_compile.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 1,
4
+ "overlap_comm": true
5
+ },
6
+ "bf16": {
7
+ "enabled": "auto"
8
+ },
9
+ "fp16": {
10
+ "enabled": "auto",
11
+ "auto_cast": false,
12
+ "loss_scale": 0,
13
+ "initial_scale_power": 32,
14
+ "loss_scale_window": 1000,
15
+ "hysteresis": 2,
16
+ "min_loss_scale": 1
17
+ },
18
+ "compile": {
19
+ "disable": false,
20
+ "backend": "inductor"
21
+ },
22
+ "gradient_accumulation_steps": "auto",
23
+ "gradient_clipping": "auto",
24
+ "train_batch_size": "auto",
25
+ "train_micro_batch_size_per_gpu": "auto",
26
+ "wall_clock_breakdown": false
27
+ }
deepspeed_configs/zero2.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 2,
4
+ "offload_optimizer": {
5
+ "device": "cpu"
6
+ },
7
+ "contiguous_gradients": true,
8
+ "overlap_comm": true
9
+ },
10
+ "bf16": {
11
+ "enabled": "auto"
12
+ },
13
+ "fp16": {
14
+ "enabled": "auto",
15
+ "auto_cast": false,
16
+ "loss_scale": 0,
17
+ "initial_scale_power": 32,
18
+ "loss_scale_window": 1000,
19
+ "hysteresis": 2,
20
+ "min_loss_scale": 1
21
+ },
22
+ "gradient_accumulation_steps": "auto",
23
+ "gradient_clipping": "auto",
24
+ "train_batch_size": "auto",
25
+ "train_micro_batch_size_per_gpu": "auto",
26
+ "wall_clock_breakdown": false
27
+ }
deepspeed_configs/zero3.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 3,
4
+ "overlap_comm": true,
5
+ "contiguous_gradients": true,
6
+ "sub_group_size": 0,
7
+ "reduce_bucket_size": "auto",
8
+ "stage3_prefetch_bucket_size": "auto",
9
+ "stage3_param_persistence_threshold": "auto",
10
+ "stage3_max_live_parameters": 0,
11
+ "stage3_max_reuse_distance": 0,
12
+ "stage3_gather_16bit_weights_on_model_save": true
13
+ },
14
+ "bf16": {
15
+ "enabled": "auto"
16
+ },
17
+ "fp16": {
18
+ "enabled": "auto",
19
+ "auto_cast": false,
20
+ "loss_scale": 0,
21
+ "initial_scale_power": 32,
22
+ "loss_scale_window": 1000,
23
+ "hysteresis": 2,
24
+ "min_loss_scale": 1
25
+ },
26
+ "gradient_accumulation_steps": "auto",
27
+ "gradient_clipping": "auto",
28
+ "train_batch_size": "auto",
29
+ "train_micro_batch_size_per_gpu": "auto",
30
+ "wall_clock_breakdown": false
31
+ }
deepspeed_configs/zero3_bf16.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 3,
4
+ "overlap_comm": true,
5
+ "contiguous_gradients": true,
6
+ "sub_group_size": 0,
7
+ "reduce_bucket_size": "auto",
8
+ "stage3_prefetch_bucket_size": "auto",
9
+ "stage3_param_persistence_threshold": "auto",
10
+ "stage3_max_live_parameters": 0,
11
+ "stage3_max_reuse_distance": 0,
12
+ "stage3_gather_16bit_weights_on_model_save": true
13
+ },
14
+ "bf16": {
15
+ "enabled": true
16
+ },
17
+ "gradient_accumulation_steps": "auto",
18
+ "gradient_clipping": "auto",
19
+ "train_batch_size": "auto",
20
+ "train_micro_batch_size_per_gpu": "auto",
21
+ "wall_clock_breakdown": false
22
+ }