Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- ACE_plus/.github/workflows/publish.yml +28 -0
- ACE_plus/examples/__init__.py +0 -0
- ACE_plus/examples/examples.py +233 -0
- ACE_plus/examples/exp_example/20250704031317/checkpoints/ldm_step-100/README.md +10 -0
- ACE_plus/examples/exp_example/20250704031317/checkpoints/ldm_step-100/configuration.json +1 -0
- ACE_plus/examples/exp_example/20250704031317/noise_schedule.png +0 -0
- ACE_plus/examples/exp_example/20250704031317/sampler_schedule.png +0 -0
- ACE_plus/examples/exp_example/20250704031317/std_log.txt +581 -0
- ACE_plus/examples/exp_example/20250704031317/train.yaml +288 -0
- ACE_plus/flashenv/bin/Activate.ps1 +247 -0
- ACE_plus/flashenv/bin/activate +69 -0
- ACE_plus/flashenv/bin/activate.csh +26 -0
- ACE_plus/flashenv/bin/activate.fish +69 -0
- ACE_plus/flashenv/bin/pip +8 -0
- ACE_plus/flashenv/bin/pip3 +8 -0
- ACE_plus/flashenv/bin/pip3.10 +8 -0
- ACE_plus/flashenv/bin/python +0 -0
- ACE_plus/flashenv/bin/python3 +0 -0
- ACE_plus/flashenv/bin/python3.10 +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/pkg_resources/__init__.py +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-310.pyc +3 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-310.pyc +3 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/align.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/box.cpython-310.pyc +0 -0
- ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -111,3 +111,6 @@ ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/distlib/w64-arm.exe f
|
|
| 111 |
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text
|
| 112 |
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text
|
| 113 |
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text
|
| 112 |
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text
|
| 113 |
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text
|
| 114 |
+
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 115 |
+
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 116 |
+
ACE_plus/flashenv/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
ACE_plus/.github/workflows/publish.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Publish to Comfy registry
|
| 2 |
+
on:
|
| 3 |
+
workflow_dispatch:
|
| 4 |
+
push:
|
| 5 |
+
branches:
|
| 6 |
+
- main
|
| 7 |
+
- master
|
| 8 |
+
paths:
|
| 9 |
+
- "pyproject.toml"
|
| 10 |
+
|
| 11 |
+
permissions:
|
| 12 |
+
issues: write
|
| 13 |
+
|
| 14 |
+
jobs:
|
| 15 |
+
publish-node:
|
| 16 |
+
name: Publish Custom Node to registry
|
| 17 |
+
runs-on: ubuntu-latest
|
| 18 |
+
if: ${{ github.repository_owner == 'ali-vilab' }}
|
| 19 |
+
steps:
|
| 20 |
+
- name: Check out code
|
| 21 |
+
uses: actions/checkout@v4
|
| 22 |
+
with:
|
| 23 |
+
submodules: true
|
| 24 |
+
- name: Publish Custom Node
|
| 25 |
+
uses: Comfy-Org/publish-node-action@v1
|
| 26 |
+
with:
|
| 27 |
+
## Add your own personal access token to your Github Repository secrets and reference it here.
|
| 28 |
+
personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
|
ACE_plus/examples/__init__.py
ADDED
|
File without changes
|
ACE_plus/examples/examples.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
all_examples = [
|
| 2 |
+
{
|
| 3 |
+
"input_image": None,
|
| 4 |
+
"input_mask": None,
|
| 5 |
+
"input_reference_image": "assets/samples/portrait/human_1.jpg",
|
| 6 |
+
"save_path": "examples/outputs/portrait_human_1.jpg",
|
| 7 |
+
"instruction": "Maintain the facial features, A girl is wearing a neat police uniform and sporting a badge. She is smiling with a friendly and confident demeanor. The background is blurred, featuring a cartoon logo.",
|
| 8 |
+
"output_h": 1024,
|
| 9 |
+
"output_w": 1024,
|
| 10 |
+
"seed": 4194866942,
|
| 11 |
+
"repainting_scale": 1.0,
|
| 12 |
+
"task_type": "portrait",
|
| 13 |
+
"edit_type": "repainting"
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"input_image": None,
|
| 17 |
+
"input_mask": None,
|
| 18 |
+
"input_reference_image": "assets/samples/subject/subject_1.jpg",
|
| 19 |
+
"save_path": "examples/outputs/subject_subject_1.jpg",
|
| 20 |
+
"instruction": "Display the logo in a minimalist style printed in white on a matte black ceramic coffee mug, alongside a steaming cup of coffee on a cozy cafe table.",
|
| 21 |
+
"output_h": 1024,
|
| 22 |
+
"output_w": 1024,
|
| 23 |
+
"seed": 2935362780,
|
| 24 |
+
"repainting_scale": 1.0,
|
| 25 |
+
"task_type": "subject",
|
| 26 |
+
"edit_type": "repainting"
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"input_image": "assets/samples/local/local_1.webp",
|
| 30 |
+
"input_mask": "assets/samples/local/local_1_m.webp",
|
| 31 |
+
"input_reference_image": None,
|
| 32 |
+
"save_path": "examples/outputs/local_local_1.jpg",
|
| 33 |
+
"instruction": "By referencing the mask, restore a partial image from the doodle {image} that aligns with the textual explanation: \"1 white old owl\".",
|
| 34 |
+
"output_h": -1,
|
| 35 |
+
"output_w": -1,
|
| 36 |
+
"seed": 1159797084,
|
| 37 |
+
"repainting_scale": 0.5,
|
| 38 |
+
"task_type": "local_editing",
|
| 39 |
+
"edit_type": "contour_repainting"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"input_image": "assets/samples/application/photo_editing/1_1_edit.png",
|
| 43 |
+
"input_mask": "assets/samples/application/photo_editing/1_1_m.png",
|
| 44 |
+
"input_reference_image": "assets/samples/application/photo_editing/1_ref.png",
|
| 45 |
+
"save_path": "examples/outputs/photo_editing_1.jpg",
|
| 46 |
+
"instruction": "The item is put on the ground.",
|
| 47 |
+
"output_h": -1,
|
| 48 |
+
"output_w": -1,
|
| 49 |
+
"seed": 2072028954,
|
| 50 |
+
"repainting_scale": 1.0,
|
| 51 |
+
"task_type": "subject",
|
| 52 |
+
"edit_type": "repainting"
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"input_image": "assets/samples/application/logo_paste/1_1_edit.png",
|
| 56 |
+
"input_mask": "assets/samples/application/logo_paste/1_1_m.png",
|
| 57 |
+
"input_reference_image": "assets/samples/application/logo_paste/1_ref.png",
|
| 58 |
+
"save_path": "examples/outputs/logo_paste_1.jpg",
|
| 59 |
+
"instruction": "The logo is printed on the headphones.",
|
| 60 |
+
"output_h": -1,
|
| 61 |
+
"output_w": -1,
|
| 62 |
+
"seed": 934582264,
|
| 63 |
+
"repainting_scale": 1.0,
|
| 64 |
+
"task_type": "subject",
|
| 65 |
+
"edit_type": "repainting"
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"input_image": "assets/samples/application/movie_poster/1_1_edit.png",
|
| 69 |
+
"input_mask": "assets/samples/application/movie_poster/1_1_m.png",
|
| 70 |
+
"input_reference_image": "assets/samples/application/movie_poster/1_ref.png",
|
| 71 |
+
"save_path": "examples/outputs/movie_poster_1.jpg",
|
| 72 |
+
"instruction": "The man is facing the camera and is smiling.",
|
| 73 |
+
"output_h": -1,
|
| 74 |
+
"output_w": -1,
|
| 75 |
+
"seed": 988183236,
|
| 76 |
+
"repainting_scale": 1.0,
|
| 77 |
+
"task_type": "portrait",
|
| 78 |
+
"edit_type": "repainting"
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
]
|
| 82 |
+
|
| 83 |
+
fft_examples = [
|
| 84 |
+
{
|
| 85 |
+
"input_image": None,
|
| 86 |
+
"input_mask": None,
|
| 87 |
+
"input_reference_image": "./assets/samples/portrait/human_1.jpg",
|
| 88 |
+
"save_path": "examples/outputs/portrait_human_1.jpg",
|
| 89 |
+
"instruction": "Maintain the facial features, A girl is wearing a neat police uniform and sporting a badge. She is smiling with a friendly and confident demeanor. The background is blurred, featuring a cartoon logo.",
|
| 90 |
+
"output_h": 1024,
|
| 91 |
+
"output_w": 1024,
|
| 92 |
+
"seed": 10000000,
|
| 93 |
+
"repainting_scale": 1.0,
|
| 94 |
+
"edit_type": "repainting"
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"input_image": None,
|
| 98 |
+
"input_mask": None,
|
| 99 |
+
"input_reference_image": "./assets/samples/subject/subject_1.jpg",
|
| 100 |
+
"save_path": "examples/outputs/subject_subject_1.jpg",
|
| 101 |
+
"instruction": "Display the logo in a minimalist style printed in white on a matte black ceramic coffee mug, alongside a steaming cup of coffee on a cozy cafe table.",
|
| 102 |
+
"output_h": 1024,
|
| 103 |
+
"output_w": 1024,
|
| 104 |
+
"seed": 10000000,
|
| 105 |
+
"repainting_scale": 1.0,
|
| 106 |
+
"edit_type": "repainting"
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"input_image": "./assets/samples/application/photo_editing/1_2_edit.jpg",
|
| 110 |
+
"input_mask": "./assets/samples/application/photo_editing/1_2_m.webp",
|
| 111 |
+
"input_reference_image": "./assets/samples/application/photo_editing/1_ref.png",
|
| 112 |
+
"save_path": "examples/outputs/photo_editing_1.jpg",
|
| 113 |
+
"instruction": "The item is put on the table.",
|
| 114 |
+
"output_h": 1024,
|
| 115 |
+
"output_w": 1024,
|
| 116 |
+
"seed": 8006019,
|
| 117 |
+
"repainting_scale": 1.0,
|
| 118 |
+
"edit_type": "repainting"
|
| 119 |
+
},
|
| 120 |
+
{
|
| 121 |
+
"input_image": "./assets/samples/application/logo_paste/1_1_edit.png",
|
| 122 |
+
"input_mask": "./assets/samples/application/logo_paste/1_1_m.png",
|
| 123 |
+
"input_reference_image": "assets/samples/application/logo_paste/1_ref.png",
|
| 124 |
+
"save_path": "examples/outputs/logo_paste_1.jpg",
|
| 125 |
+
"instruction": "The logo is printed on the headphones.",
|
| 126 |
+
"output_h": 1024,
|
| 127 |
+
"output_w": 1024,
|
| 128 |
+
"seed": 934582264,
|
| 129 |
+
"repainting_scale": 1.0,
|
| 130 |
+
"edit_type": "repainting"
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"input_image": "./assets/samples/application/try_on/1_1_edit.png",
|
| 134 |
+
"input_mask": "./assets/samples/application/try_on/1_1_m.png",
|
| 135 |
+
"input_reference_image": "assets/samples/application/try_on/1_ref.png",
|
| 136 |
+
"save_path": "examples/outputs/try_on_1.jpg",
|
| 137 |
+
"instruction": "The woman dresses this skirt.",
|
| 138 |
+
"output_h": 1024,
|
| 139 |
+
"output_w": 1024,
|
| 140 |
+
"seed": 934582264,
|
| 141 |
+
"repainting_scale": 1.0,
|
| 142 |
+
"edit_type": "repainting"
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"input_image": "./assets/samples/portrait/human_1.jpg",
|
| 146 |
+
"input_mask": "assets/samples/application/movie_poster/1_2_m.webp",
|
| 147 |
+
"input_reference_image": "assets/samples/application/movie_poster/1_ref.png",
|
| 148 |
+
"save_path": "examples/outputs/movie_poster_1.jpg",
|
| 149 |
+
"instruction": "{image}, the man faces the camera.",
|
| 150 |
+
"output_h": 1024,
|
| 151 |
+
"output_w": 1024,
|
| 152 |
+
"seed": 3999647,
|
| 153 |
+
"repainting_scale": 1.0,
|
| 154 |
+
"edit_type": "repainting"
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"input_image": "./assets/samples/application/sr/sr_tiger.png",
|
| 158 |
+
"input_mask": "./assets/samples/application/sr/sr_tiger_m.webp",
|
| 159 |
+
"input_reference_image": None,
|
| 160 |
+
"save_path": "examples/outputs/mario_recolorizing_1.jpg",
|
| 161 |
+
"instruction": "{image} features a close-up of a young, furry tiger cub on a rock. The tiger, which appears to be quite young, has distinctive orange, "
|
| 162 |
+
"black, and white striped fur, typical of tigers. The cub's eyes have a bright and curious expression, and its ears are perked up, "
|
| 163 |
+
"indicating alertness. The cub seems to be in the act of climbing or resting on the rock. The background is a blurred grassland with trees, "
|
| 164 |
+
"but the focus is on the cub, which is vividly colored while the rest of the image is in grayscale, drawing attention to the tiger's details."
|
| 165 |
+
" The photo captures a moment in the wild, depicting the charming and tenacious nature of this young tiger,"
|
| 166 |
+
" as well as its typical interaction with the environment.",
|
| 167 |
+
"output_h": 1024,
|
| 168 |
+
"output_w": 1024,
|
| 169 |
+
"seed": 199999,
|
| 170 |
+
"repainting_scale": 0.0,
|
| 171 |
+
"edit_type": "no_preprocess"
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"input_image": "./assets/samples/application/photo_editing/1_ref.png",
|
| 175 |
+
"input_mask": "./assets/samples/application/photo_editing/1_1_orm.webp",
|
| 176 |
+
"input_reference_image": None,
|
| 177 |
+
"save_path": "examples/outputs/mario_repainting_1.jpg",
|
| 178 |
+
"instruction": "a blue hand",
|
| 179 |
+
"output_h": 1024,
|
| 180 |
+
"output_w": 1024,
|
| 181 |
+
"seed": 63401,
|
| 182 |
+
"repainting_scale": 1.0,
|
| 183 |
+
"edit_type": "repainting"
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"input_image": "./assets/samples/application/photo_editing/1_ref.png",
|
| 187 |
+
"input_mask": "./assets/samples/application/photo_editing/1_1_rm.webp",
|
| 188 |
+
"input_reference_image": None,
|
| 189 |
+
"save_path": "examples/outputs/mario_repainting_2.jpg",
|
| 190 |
+
"instruction": "Mechanical hands like a robot",
|
| 191 |
+
"output_h": 1024,
|
| 192 |
+
"output_w": 1024,
|
| 193 |
+
"seed": 59107,
|
| 194 |
+
"repainting_scale": 1.0,
|
| 195 |
+
"edit_type": "repainting"
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"input_image": "./assets/samples/control/1_1.webp",
|
| 199 |
+
"input_mask": "./assets/samples/control/1_1_m.webp",
|
| 200 |
+
"input_reference_image": None,
|
| 201 |
+
"save_path": "examples/outputs/control_recolorizing.jpg",
|
| 202 |
+
"instruction": "{image} Beautiful female portrait, Robot with smooth White transparent carbon shell, rococo detailing, Natural lighting, Highly detailed, Cinematic, 4K.",
|
| 203 |
+
"output_h": 1024,
|
| 204 |
+
"output_w": 1024,
|
| 205 |
+
"seed": 9652101,
|
| 206 |
+
"repainting_scale": 0.0,
|
| 207 |
+
"edit_type": "recolorizing"
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"input_image": "./assets/samples/control/1_1.webp",
|
| 211 |
+
"input_mask": "./assets/samples/control/1_1_m.webp",
|
| 212 |
+
"input_reference_image": None,
|
| 213 |
+
"save_path": "examples/outputs/control_depth.jpg",
|
| 214 |
+
"instruction": "{image} Beautiful female portrait, Robot with smooth White transparent carbon shell, rococo detailing, Natural lighting, Highly detailed, Cinematic, 4K.",
|
| 215 |
+
"output_h": 1024,
|
| 216 |
+
"output_w": 1024,
|
| 217 |
+
"seed": 14979476,
|
| 218 |
+
"repainting_scale": 0.0,
|
| 219 |
+
"edit_type": "depth_repainting"
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"input_image": "./assets/samples/control/1_1.webp",
|
| 223 |
+
"input_mask": "./assets/samples/control/1_1_m.webp",
|
| 224 |
+
"input_reference_image": None,
|
| 225 |
+
"save_path": "examples/outputs/control_contour.jpg",
|
| 226 |
+
"instruction": "{image} Beautiful female portrait, Robot with smooth White transparent carbon shell, rococo detailing, Natural lighting, Highly detailed, Cinematic, 4K.",
|
| 227 |
+
"output_h": 1024,
|
| 228 |
+
"output_w": 1024,
|
| 229 |
+
"seed": 4227292472,
|
| 230 |
+
"repainting_scale": 0.0,
|
| 231 |
+
"edit_type": "contour_repainting"
|
| 232 |
+
}
|
| 233 |
+
]
|
ACE_plus/examples/exp_example/20250704031317/checkpoints/ldm_step-100/README.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Training procedure
|
| 2 |
+
|
| 3 |
+
### Framework versions
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
- SWIFT 3.4.0
|
| 7 |
+
### Base model information
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
- BaseModel Class LatentDiffusionACEPlus
|
ACE_plus/examples/exp_example/20250704031317/checkpoints/ldm_step-100/configuration.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{}
|
ACE_plus/examples/exp_example/20250704031317/noise_schedule.png
ADDED
|
ACE_plus/examples/exp_example/20250704031317/sampler_schedule.png
ADDED
|
ACE_plus/examples/exp_example/20250704031317/std_log.txt
ADDED
|
@@ -0,0 +1,581 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
scepter [INFO] 2025-07-04 03:13:19,003 [File: logger.py Function: init_logger at line 85] Running task with log file: /home/Ubuntu/Downloads/Unmodel/ACE_plus/./examples/exp_example/20250704031317/std_log.txt
|
| 2 |
+
scepter [WARNING] 2025-07-04 03:13:19,094 [File: import_utils.py Function: import_module at line 325] ('DATASETS', 'ACEPlusDataset') not found in ast index file
|
| 3 |
+
scepter [INFO] 2025-07-04 03:13:19,095 [File: ace_plus_dataset.py Function: read_data_list at line 151] subject has 12 samples.
|
| 4 |
+
scepter [INFO] 2025-07-04 03:13:19,096 [File: registry.py Function: __init__ at line 185] Built dataloader with len 9223372036854775807
|
| 5 |
+
scepter [WARNING] 2025-07-04 03:13:19,096 [File: import_utils.py Function: import_module at line 325] ('DATASETS', 'ACEPlusDataset') not found in ast index file
|
| 6 |
+
scepter [INFO] 2025-07-04 03:13:19,096 [File: ace_plus_dataset.py Function: read_data_list at line 151] subject has 12 samples.
|
| 7 |
+
scepter [INFO] 2025-07-04 03:13:19,096 [File: registry.py Function: __init__ at line 185] Built dataloader with len 12
|
| 8 |
+
scepter [INFO] 2025-07-04 03:14:34,962 [File: flux.py Function: load_pretrained_model at line 450] Restored from /home/Ubuntu/Downloads/Unmodel/Reference_models/flux1-fill-dev.safetensors with 0 missing and 0 unexpected keys
|
| 9 |
+
scepter [INFO] 2025-07-04 03:14:34,987 [File: ace_plus_ldm.py Function: construct_network at line 62] all parameters:11.90B
|
| 10 |
+
scepter [INFO] 2025-07-04 03:14:35,816 [File: ae_module.py Function: construct_model at line 76] AE Module XFORMERS_IS_AVAILBLE : True
|
| 11 |
+
scepter [INFO] 2025-07-04 03:14:36,509 [File: ae_kl.py Function: init_from_ckpt at line 400] Restored from /home/Ubuntu/Downloads/Unmodel/Reference_models/ae.safetensors with 0 missing and 0 unexpected keys
|
| 12 |
+
scepter [INFO] 2025-07-04 03:14:45,062 [File: diffusion_solver.py Function: add_tuner at line 788] [('base_model.model.double_blocks.0.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.0.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.0.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.0.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.0.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.0.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.0.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.0.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.0.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.0.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.0.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.0.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.0.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.1.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.1.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.1.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.1.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.1.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.1.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.1.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.1.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.1.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.1.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.1.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.1.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.1.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.2.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.2.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.2.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.2.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.2.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.2.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.2.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.2.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.2.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.2.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.2.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.2.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.2.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.3.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.3.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.3.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.3.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.3.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.3.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.3.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.3.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.3.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.3.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.3.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.3.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.3.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.4.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.4.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.4.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.4.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.4.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.4.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.4.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.4.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.4.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.4.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.4.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.4.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.4.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.5.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.5.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.5.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.5.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.5.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.5.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.5.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.5.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.5.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.5.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.5.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.5.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.5.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.6.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.6.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.6.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.6.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.6.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.6.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.6.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.6.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.6.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.6.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.6.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.6.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.6.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.7.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.7.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.7.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.7.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.7.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.7.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.7.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.7.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.7.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.7.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.7.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.7.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.7.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.8.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.8.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.8.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.8.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.8.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.8.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.8.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.8.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.8.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.8.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.8.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.8.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.8.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.9.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.9.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.9.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.9.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.9.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.9.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.9.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.9.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.9.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.9.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.9.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.9.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.9.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.10.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.10.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.10.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.10.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.10.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.10.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.10.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.10.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.10.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.10.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.10.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.10.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.10.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.11.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.11.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.11.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.11.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.11.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.11.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.11.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.11.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.11.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.11.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.11.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.11.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.11.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.12.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.12.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.12.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.12.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.12.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.12.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.12.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.12.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.12.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.12.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.12.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.12.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.12.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.13.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.13.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.13.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.13.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.13.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.13.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.13.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.13.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.13.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.13.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.13.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.13.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.13.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.14.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.14.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.14.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.14.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.14.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.14.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.14.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.14.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.14.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.14.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.14.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.14.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.14.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.15.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.15.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.15.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.15.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.15.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.15.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.15.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.15.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.15.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.15.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.15.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.15.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.15.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.16.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.16.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.16.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.16.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.16.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.16.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.16.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.16.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.16.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.16.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.16.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.16.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.16.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.17.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.17.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.17.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.17.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.17.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.17.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.17.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.17.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.17.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.17.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.17.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.17.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.17.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.18.img_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.img_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.18.img_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.img_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.18.img_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.img_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.18.img_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.img_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.18.img_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.18.img_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.18.txt_mod.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.txt_mod.lin.lora_B.0_SwiftLoRA.weight', torch.Size([18432, 64])), ('base_model.model.double_blocks.18.txt_attn.qkv.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.txt_attn.qkv.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.double_blocks.18.txt_attn.proj.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.txt_attn.proj.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.double_blocks.18.txt_mlp.0.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.double_blocks.18.txt_mlp.0.lora_B.0_SwiftLoRA.weight', torch.Size([12288, 64])), ('base_model.model.double_blocks.18.txt_mlp.2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 12288])), ('base_model.model.double_blocks.18.txt_mlp.2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.0.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.0.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.0.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.0.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.0.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.0.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.1.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.1.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.1.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.1.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.1.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.1.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.2.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.2.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.2.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.2.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.2.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.2.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.3.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.3.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.3.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.3.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.3.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.3.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.4.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.4.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.4.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.4.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.4.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.4.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.5.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.5.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.5.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.5.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.5.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.5.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.6.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.6.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.6.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.6.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.6.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.6.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.7.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.7.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.7.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.7.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.7.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.7.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.8.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.8.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.8.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.8.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.8.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.8.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.9.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.9.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.9.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.9.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.9.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.9.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.10.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.10.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.10.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.10.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.10.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.10.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.11.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.11.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.11.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.11.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.11.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.11.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.12.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.12.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.12.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.12.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.12.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.12.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.13.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.13.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.13.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.13.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.13.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.13.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.14.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.14.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.14.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.14.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.14.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.14.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.15.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.15.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.15.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.15.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.15.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.15.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.16.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.16.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.16.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.16.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.16.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.16.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.17.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.17.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.17.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.17.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.17.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.17.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.18.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.18.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.18.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.18.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.18.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.18.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.19.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.19.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.19.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.19.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.19.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.19.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.20.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.20.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.20.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.20.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.20.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.20.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.21.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.21.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.21.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.21.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.21.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.21.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.22.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.22.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.22.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.22.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.22.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.22.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.23.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.23.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.23.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.23.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.23.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.23.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.24.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.24.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.24.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.24.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.24.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.24.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.25.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.25.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.25.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.25.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.25.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.25.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.26.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.26.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.26.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.26.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.26.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.26.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.27.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.27.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.27.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.27.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.27.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.27.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.28.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.28.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.28.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.28.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.28.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.28.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.29.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.29.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.29.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.29.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.29.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.29.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.30.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.30.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.30.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.30.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.30.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.30.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.31.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.31.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.31.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.31.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.31.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.31.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.32.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.32.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.32.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.32.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.32.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.32.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.33.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.33.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.33.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.33.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.33.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.33.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.34.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.34.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.34.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.34.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.34.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.34.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.35.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.35.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.35.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.35.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.35.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.35.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.36.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.36.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.36.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.36.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.36.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.36.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64])), ('base_model.model.single_blocks.37.linear1.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.37.linear1.lora_B.0_SwiftLoRA.weight', torch.Size([21504, 64])), ('base_model.model.single_blocks.37.linear2.lora_A.0_SwiftLoRA.weight', torch.Size([64, 15360])), ('base_model.model.single_blocks.37.linear2.lora_B.0_SwiftLoRA.weight', torch.Size([3072, 64])), ('base_model.model.single_blocks.37.modulation.lin.lora_A.0_SwiftLoRA.weight', torch.Size([64, 3072])), ('base_model.model.single_blocks.37.modulation.lin.lora_B.0_SwiftLoRA.weight', torch.Size([9216, 64]))]
|
| 13 |
+
scepter [INFO] 2025-07-04 03:14:45,092 [File: diffusion_solver.py Function: print_model_params_status at line 996] Load trainable params 306315264 / 17178094051 = 1.78%, train part: {'model.double_blocks': 171835392, 'model.single_blocks': 134479872}.
|
| 14 |
+
scepter [INFO] 2025-07-04 03:14:45,092 [File: diffusion_solver.py Function: print_model_params_status at line 1000] Load frozen params 16871778787 / 17178094051 = 98.22%, frozen part: {'model': 11902587968, 'first_stage_model': 83819683, 'cond_stage_model': 4885371136}.
|
| 15 |
+
scepter [INFO] 2025-07-04 03:14:54,914 [File: diffusion_solver.py Function: set_up at line 230] SwiftModel(
|
| 16 |
+
(base_model): LatentDiffusionACEPlus LatentDiffusionACEPlus(
|
| 17 |
+
(model): FluxMRModiACEPlus FluxMRModiACEPlus(
|
| 18 |
+
(pe_embedder): EmbedND()
|
| 19 |
+
(img_in): Linear(in_features=448, out_features=3072, bias=True)
|
| 20 |
+
(time_in): MLPEmbedder(
|
| 21 |
+
(in_layer): Linear(in_features=256, out_features=3072, bias=True)
|
| 22 |
+
(silu): SiLU()
|
| 23 |
+
(out_layer): Linear(in_features=3072, out_features=3072, bias=True)
|
| 24 |
+
)
|
| 25 |
+
(vector_in): MLPEmbedder(
|
| 26 |
+
(in_layer): Linear(in_features=768, out_features=3072, bias=True)
|
| 27 |
+
(silu): SiLU()
|
| 28 |
+
(out_layer): Linear(in_features=3072, out_features=3072, bias=True)
|
| 29 |
+
)
|
| 30 |
+
(guidance_in): MLPEmbedder(
|
| 31 |
+
(in_layer): Linear(in_features=256, out_features=3072, bias=True)
|
| 32 |
+
(silu): SiLU()
|
| 33 |
+
(out_layer): Linear(in_features=3072, out_features=3072, bias=True)
|
| 34 |
+
)
|
| 35 |
+
(txt_in): Linear(in_features=4096, out_features=3072, bias=True)
|
| 36 |
+
(double_blocks): ModuleList(
|
| 37 |
+
(0-18): 19 x DoubleStreamBlock(
|
| 38 |
+
(img_mod): Modulation(
|
| 39 |
+
(lin): lora.Linear(
|
| 40 |
+
(base_layer): Linear(in_features=3072, out_features=18432, bias=True)
|
| 41 |
+
(lora_dropout): ModuleDict(
|
| 42 |
+
(0_SwiftLoRA): Identity()
|
| 43 |
+
)
|
| 44 |
+
(lora_A): ModuleDict(
|
| 45 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 46 |
+
)
|
| 47 |
+
(lora_B): ModuleDict(
|
| 48 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=18432, bias=False)
|
| 49 |
+
)
|
| 50 |
+
(lora_embedding_A): ParameterDict()
|
| 51 |
+
(lora_embedding_B): ParameterDict()
|
| 52 |
+
(lora_magnitude_vector): ModuleDict()
|
| 53 |
+
)
|
| 54 |
+
)
|
| 55 |
+
(img_norm1): LayerNorm((3072,), eps=1e-06, elementwise_affine=False)
|
| 56 |
+
(img_attn): SelfAttention(
|
| 57 |
+
(qkv): lora.Linear(
|
| 58 |
+
(base_layer): Linear(in_features=3072, out_features=9216, bias=True)
|
| 59 |
+
(lora_dropout): ModuleDict(
|
| 60 |
+
(0_SwiftLoRA): Identity()
|
| 61 |
+
)
|
| 62 |
+
(lora_A): ModuleDict(
|
| 63 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 64 |
+
)
|
| 65 |
+
(lora_B): ModuleDict(
|
| 66 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=9216, bias=False)
|
| 67 |
+
)
|
| 68 |
+
(lora_embedding_A): ParameterDict()
|
| 69 |
+
(lora_embedding_B): ParameterDict()
|
| 70 |
+
(lora_magnitude_vector): ModuleDict()
|
| 71 |
+
)
|
| 72 |
+
(norm): QKNorm(
|
| 73 |
+
(query_norm): RMSNorm()
|
| 74 |
+
(key_norm): RMSNorm()
|
| 75 |
+
)
|
| 76 |
+
(proj): lora.Linear(
|
| 77 |
+
(base_layer): Linear(in_features=3072, out_features=3072, bias=True)
|
| 78 |
+
(lora_dropout): ModuleDict(
|
| 79 |
+
(0_SwiftLoRA): Identity()
|
| 80 |
+
)
|
| 81 |
+
(lora_A): ModuleDict(
|
| 82 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 83 |
+
)
|
| 84 |
+
(lora_B): ModuleDict(
|
| 85 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=3072, bias=False)
|
| 86 |
+
)
|
| 87 |
+
(lora_embedding_A): ParameterDict()
|
| 88 |
+
(lora_embedding_B): ParameterDict()
|
| 89 |
+
(lora_magnitude_vector): ModuleDict()
|
| 90 |
+
)
|
| 91 |
+
)
|
| 92 |
+
(img_norm2): LayerNorm((3072,), eps=1e-06, elementwise_affine=False)
|
| 93 |
+
(img_mlp): Sequential(
|
| 94 |
+
(0): lora.Linear(
|
| 95 |
+
(base_layer): Linear(in_features=3072, out_features=12288, bias=True)
|
| 96 |
+
(lora_dropout): ModuleDict(
|
| 97 |
+
(0_SwiftLoRA): Identity()
|
| 98 |
+
)
|
| 99 |
+
(lora_A): ModuleDict(
|
| 100 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 101 |
+
)
|
| 102 |
+
(lora_B): ModuleDict(
|
| 103 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=12288, bias=False)
|
| 104 |
+
)
|
| 105 |
+
(lora_embedding_A): ParameterDict()
|
| 106 |
+
(lora_embedding_B): ParameterDict()
|
| 107 |
+
(lora_magnitude_vector): ModuleDict()
|
| 108 |
+
)
|
| 109 |
+
(1): GELU(approximate='tanh')
|
| 110 |
+
(2): lora.Linear(
|
| 111 |
+
(base_layer): Linear(in_features=12288, out_features=3072, bias=True)
|
| 112 |
+
(lora_dropout): ModuleDict(
|
| 113 |
+
(0_SwiftLoRA): Identity()
|
| 114 |
+
)
|
| 115 |
+
(lora_A): ModuleDict(
|
| 116 |
+
(0_SwiftLoRA): Linear(in_features=12288, out_features=64, bias=False)
|
| 117 |
+
)
|
| 118 |
+
(lora_B): ModuleDict(
|
| 119 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=3072, bias=False)
|
| 120 |
+
)
|
| 121 |
+
(lora_embedding_A): ParameterDict()
|
| 122 |
+
(lora_embedding_B): ParameterDict()
|
| 123 |
+
(lora_magnitude_vector): ModuleDict()
|
| 124 |
+
)
|
| 125 |
+
)
|
| 126 |
+
(txt_mod): Modulation(
|
| 127 |
+
(lin): lora.Linear(
|
| 128 |
+
(base_layer): Linear(in_features=3072, out_features=18432, bias=True)
|
| 129 |
+
(lora_dropout): ModuleDict(
|
| 130 |
+
(0_SwiftLoRA): Identity()
|
| 131 |
+
)
|
| 132 |
+
(lora_A): ModuleDict(
|
| 133 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 134 |
+
)
|
| 135 |
+
(lora_B): ModuleDict(
|
| 136 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=18432, bias=False)
|
| 137 |
+
)
|
| 138 |
+
(lora_embedding_A): ParameterDict()
|
| 139 |
+
(lora_embedding_B): ParameterDict()
|
| 140 |
+
(lora_magnitude_vector): ModuleDict()
|
| 141 |
+
)
|
| 142 |
+
)
|
| 143 |
+
(txt_norm1): LayerNorm((3072,), eps=1e-06, elementwise_affine=False)
|
| 144 |
+
(txt_attn): SelfAttention(
|
| 145 |
+
(qkv): lora.Linear(
|
| 146 |
+
(base_layer): Linear(in_features=3072, out_features=9216, bias=True)
|
| 147 |
+
(lora_dropout): ModuleDict(
|
| 148 |
+
(0_SwiftLoRA): Identity()
|
| 149 |
+
)
|
| 150 |
+
(lora_A): ModuleDict(
|
| 151 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 152 |
+
)
|
| 153 |
+
(lora_B): ModuleDict(
|
| 154 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=9216, bias=False)
|
| 155 |
+
)
|
| 156 |
+
(lora_embedding_A): ParameterDict()
|
| 157 |
+
(lora_embedding_B): ParameterDict()
|
| 158 |
+
(lora_magnitude_vector): ModuleDict()
|
| 159 |
+
)
|
| 160 |
+
(norm): QKNorm(
|
| 161 |
+
(query_norm): RMSNorm()
|
| 162 |
+
(key_norm): RMSNorm()
|
| 163 |
+
)
|
| 164 |
+
(proj): lora.Linear(
|
| 165 |
+
(base_layer): Linear(in_features=3072, out_features=3072, bias=True)
|
| 166 |
+
(lora_dropout): ModuleDict(
|
| 167 |
+
(0_SwiftLoRA): Identity()
|
| 168 |
+
)
|
| 169 |
+
(lora_A): ModuleDict(
|
| 170 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 171 |
+
)
|
| 172 |
+
(lora_B): ModuleDict(
|
| 173 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=3072, bias=False)
|
| 174 |
+
)
|
| 175 |
+
(lora_embedding_A): ParameterDict()
|
| 176 |
+
(lora_embedding_B): ParameterDict()
|
| 177 |
+
(lora_magnitude_vector): ModuleDict()
|
| 178 |
+
)
|
| 179 |
+
)
|
| 180 |
+
(txt_norm2): LayerNorm((3072,), eps=1e-06, elementwise_affine=False)
|
| 181 |
+
(txt_mlp): Sequential(
|
| 182 |
+
(0): lora.Linear(
|
| 183 |
+
(base_layer): Linear(in_features=3072, out_features=12288, bias=True)
|
| 184 |
+
(lora_dropout): ModuleDict(
|
| 185 |
+
(0_SwiftLoRA): Identity()
|
| 186 |
+
)
|
| 187 |
+
(lora_A): ModuleDict(
|
| 188 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 189 |
+
)
|
| 190 |
+
(lora_B): ModuleDict(
|
| 191 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=12288, bias=False)
|
| 192 |
+
)
|
| 193 |
+
(lora_embedding_A): ParameterDict()
|
| 194 |
+
(lora_embedding_B): ParameterDict()
|
| 195 |
+
(lora_magnitude_vector): ModuleDict()
|
| 196 |
+
)
|
| 197 |
+
(1): GELU(approximate='tanh')
|
| 198 |
+
(2): lora.Linear(
|
| 199 |
+
(base_layer): Linear(in_features=12288, out_features=3072, bias=True)
|
| 200 |
+
(lora_dropout): ModuleDict(
|
| 201 |
+
(0_SwiftLoRA): Identity()
|
| 202 |
+
)
|
| 203 |
+
(lora_A): ModuleDict(
|
| 204 |
+
(0_SwiftLoRA): Linear(in_features=12288, out_features=64, bias=False)
|
| 205 |
+
)
|
| 206 |
+
(lora_B): ModuleDict(
|
| 207 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=3072, bias=False)
|
| 208 |
+
)
|
| 209 |
+
(lora_embedding_A): ParameterDict()
|
| 210 |
+
(lora_embedding_B): ParameterDict()
|
| 211 |
+
(lora_magnitude_vector): ModuleDict()
|
| 212 |
+
)
|
| 213 |
+
)
|
| 214 |
+
)
|
| 215 |
+
)
|
| 216 |
+
(single_blocks): ModuleList(
|
| 217 |
+
(0-37): 38 x SingleStreamBlock(
|
| 218 |
+
(linear1): lora.Linear(
|
| 219 |
+
(base_layer): Linear(in_features=3072, out_features=21504, bias=True)
|
| 220 |
+
(lora_dropout): ModuleDict(
|
| 221 |
+
(0_SwiftLoRA): Identity()
|
| 222 |
+
)
|
| 223 |
+
(lora_A): ModuleDict(
|
| 224 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 225 |
+
)
|
| 226 |
+
(lora_B): ModuleDict(
|
| 227 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=21504, bias=False)
|
| 228 |
+
)
|
| 229 |
+
(lora_embedding_A): ParameterDict()
|
| 230 |
+
(lora_embedding_B): ParameterDict()
|
| 231 |
+
(lora_magnitude_vector): ModuleDict()
|
| 232 |
+
)
|
| 233 |
+
(linear2): lora.Linear(
|
| 234 |
+
(base_layer): Linear(in_features=15360, out_features=3072, bias=True)
|
| 235 |
+
(lora_dropout): ModuleDict(
|
| 236 |
+
(0_SwiftLoRA): Identity()
|
| 237 |
+
)
|
| 238 |
+
(lora_A): ModuleDict(
|
| 239 |
+
(0_SwiftLoRA): Linear(in_features=15360, out_features=64, bias=False)
|
| 240 |
+
)
|
| 241 |
+
(lora_B): ModuleDict(
|
| 242 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=3072, bias=False)
|
| 243 |
+
)
|
| 244 |
+
(lora_embedding_A): ParameterDict()
|
| 245 |
+
(lora_embedding_B): ParameterDict()
|
| 246 |
+
(lora_magnitude_vector): ModuleDict()
|
| 247 |
+
)
|
| 248 |
+
(norm): QKNorm(
|
| 249 |
+
(query_norm): RMSNorm()
|
| 250 |
+
(key_norm): RMSNorm()
|
| 251 |
+
)
|
| 252 |
+
(pre_norm): LayerNorm((3072,), eps=1e-06, elementwise_affine=False)
|
| 253 |
+
(mlp_act): GELU(approximate='tanh')
|
| 254 |
+
(modulation): Modulation(
|
| 255 |
+
(lin): lora.Linear(
|
| 256 |
+
(base_layer): Linear(in_features=3072, out_features=9216, bias=True)
|
| 257 |
+
(lora_dropout): ModuleDict(
|
| 258 |
+
(0_SwiftLoRA): Identity()
|
| 259 |
+
)
|
| 260 |
+
(lora_A): ModuleDict(
|
| 261 |
+
(0_SwiftLoRA): Linear(in_features=3072, out_features=64, bias=False)
|
| 262 |
+
)
|
| 263 |
+
(lora_B): ModuleDict(
|
| 264 |
+
(0_SwiftLoRA): Linear(in_features=64, out_features=9216, bias=False)
|
| 265 |
+
)
|
| 266 |
+
(lora_embedding_A): ParameterDict()
|
| 267 |
+
(lora_embedding_B): ParameterDict()
|
| 268 |
+
(lora_magnitude_vector): ModuleDict()
|
| 269 |
+
)
|
| 270 |
+
)
|
| 271 |
+
)
|
| 272 |
+
)
|
| 273 |
+
(final_layer): LastLayer(
|
| 274 |
+
(norm_final): LayerNorm((3072,), eps=1e-06, elementwise_affine=False)
|
| 275 |
+
(linear): Linear(in_features=3072, out_features=64, bias=True)
|
| 276 |
+
(adaLN_modulation): Sequential(
|
| 277 |
+
(0): SiLU()
|
| 278 |
+
(1): Linear(in_features=3072, out_features=6144, bias=True)
|
| 279 |
+
)
|
| 280 |
+
)
|
| 281 |
+
)
|
| 282 |
+
(first_stage_model): AutoencoderKLFlux AutoencoderKLFlux(
|
| 283 |
+
(encoder): Encoder Encoder(
|
| 284 |
+
(conv_in): Conv2d(3, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 285 |
+
(down): ModuleList(
|
| 286 |
+
(0): Module(
|
| 287 |
+
(block): ModuleList(
|
| 288 |
+
(0-1): 2 x ResnetBlock(
|
| 289 |
+
(norm1): GroupNorm(32, 128, eps=1e-06, affine=True)
|
| 290 |
+
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 291 |
+
(norm2): GroupNorm(32, 128, eps=1e-06, affine=True)
|
| 292 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 293 |
+
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 294 |
+
)
|
| 295 |
+
)
|
| 296 |
+
(attn): ModuleList()
|
| 297 |
+
(downsample): Downsample(
|
| 298 |
+
(conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2))
|
| 299 |
+
)
|
| 300 |
+
)
|
| 301 |
+
(1): Module(
|
| 302 |
+
(block): ModuleList(
|
| 303 |
+
(0): ResnetBlock(
|
| 304 |
+
(norm1): GroupNorm(32, 128, eps=1e-06, affine=True)
|
| 305 |
+
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 306 |
+
(norm2): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 307 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 308 |
+
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 309 |
+
(nin_shortcut): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
|
| 310 |
+
)
|
| 311 |
+
(1): ResnetBlock(
|
| 312 |
+
(norm1): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 313 |
+
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 314 |
+
(norm2): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 315 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 316 |
+
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 317 |
+
)
|
| 318 |
+
)
|
| 319 |
+
(attn): ModuleList()
|
| 320 |
+
(downsample): Downsample(
|
| 321 |
+
(conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2))
|
| 322 |
+
)
|
| 323 |
+
)
|
| 324 |
+
(2): Module(
|
| 325 |
+
(block): ModuleList(
|
| 326 |
+
(0): ResnetBlock(
|
| 327 |
+
(norm1): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 328 |
+
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 329 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 330 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 331 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 332 |
+
(nin_shortcut): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 333 |
+
)
|
| 334 |
+
(1): ResnetBlock(
|
| 335 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 336 |
+
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 337 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 338 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 339 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 340 |
+
)
|
| 341 |
+
)
|
| 342 |
+
(attn): ModuleList()
|
| 343 |
+
(downsample): Downsample(
|
| 344 |
+
(conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2))
|
| 345 |
+
)
|
| 346 |
+
)
|
| 347 |
+
(3): Module(
|
| 348 |
+
(block): ModuleList(
|
| 349 |
+
(0-1): 2 x ResnetBlock(
|
| 350 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 351 |
+
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 352 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 353 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 354 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 355 |
+
)
|
| 356 |
+
)
|
| 357 |
+
(attn): ModuleList()
|
| 358 |
+
)
|
| 359 |
+
)
|
| 360 |
+
(mid): Module(
|
| 361 |
+
(block_1): ResnetBlock(
|
| 362 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 363 |
+
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 364 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 365 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 366 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 367 |
+
)
|
| 368 |
+
(attn_1): MemoryEfficientAttention(
|
| 369 |
+
(norm): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 370 |
+
(q): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 371 |
+
(k): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 372 |
+
(v): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 373 |
+
(proj_out): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 374 |
+
)
|
| 375 |
+
(block_2): ResnetBlock(
|
| 376 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 377 |
+
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 378 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 379 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 380 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 381 |
+
)
|
| 382 |
+
)
|
| 383 |
+
(norm_out): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 384 |
+
(conv_out): Conv2d(512, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 385 |
+
)
|
| 386 |
+
(decoder): Decoder Decoder(
|
| 387 |
+
(conv_in): Conv2d(16, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 388 |
+
(mid): Module(
|
| 389 |
+
(block_1): ResnetBlock(
|
| 390 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 391 |
+
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 392 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 393 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 394 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 395 |
+
)
|
| 396 |
+
(attn_1): MemoryEfficientAttention(
|
| 397 |
+
(norm): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 398 |
+
(q): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 399 |
+
(k): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 400 |
+
(v): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 401 |
+
(proj_out): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
|
| 402 |
+
)
|
| 403 |
+
(block_2): ResnetBlock(
|
| 404 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 405 |
+
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 406 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 407 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 408 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 409 |
+
)
|
| 410 |
+
)
|
| 411 |
+
(up): ModuleList(
|
| 412 |
+
(0): Module(
|
| 413 |
+
(block): ModuleList(
|
| 414 |
+
(0): ResnetBlock(
|
| 415 |
+
(norm1): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 416 |
+
(conv1): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 417 |
+
(norm2): GroupNorm(32, 128, eps=1e-06, affine=True)
|
| 418 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 419 |
+
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 420 |
+
(nin_shortcut): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))
|
| 421 |
+
)
|
| 422 |
+
(1-2): 2 x ResnetBlock(
|
| 423 |
+
(norm1): GroupNorm(32, 128, eps=1e-06, affine=True)
|
| 424 |
+
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 425 |
+
(norm2): GroupNorm(32, 128, eps=1e-06, affine=True)
|
| 426 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 427 |
+
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 428 |
+
)
|
| 429 |
+
)
|
| 430 |
+
(attn): ModuleList()
|
| 431 |
+
)
|
| 432 |
+
(1): Module(
|
| 433 |
+
(block): ModuleList(
|
| 434 |
+
(0): ResnetBlock(
|
| 435 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 436 |
+
(conv1): Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 437 |
+
(norm2): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 438 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 439 |
+
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 440 |
+
(nin_shortcut): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
|
| 441 |
+
)
|
| 442 |
+
(1-2): 2 x ResnetBlock(
|
| 443 |
+
(norm1): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 444 |
+
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 445 |
+
(norm2): GroupNorm(32, 256, eps=1e-06, affine=True)
|
| 446 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 447 |
+
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 448 |
+
)
|
| 449 |
+
)
|
| 450 |
+
(attn): ModuleList()
|
| 451 |
+
(upsample): Upsample(
|
| 452 |
+
(conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 453 |
+
)
|
| 454 |
+
)
|
| 455 |
+
(2-3): 2 x Module(
|
| 456 |
+
(block): ModuleList(
|
| 457 |
+
(0-2): 3 x ResnetBlock(
|
| 458 |
+
(norm1): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 459 |
+
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 460 |
+
(norm2): GroupNorm(32, 512, eps=1e-06, affine=True)
|
| 461 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
| 462 |
+
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 463 |
+
)
|
| 464 |
+
)
|
| 465 |
+
(attn): ModuleList()
|
| 466 |
+
(upsample): Upsample(
|
| 467 |
+
(conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 468 |
+
)
|
| 469 |
+
)
|
| 470 |
+
)
|
| 471 |
+
(norm_out): GroupNorm(32, 128, eps=1e-06, affine=True)
|
| 472 |
+
(conv_out): Conv2d(128, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
|
| 473 |
+
)
|
| 474 |
+
(conv1): Identity()
|
| 475 |
+
(conv2): Identity()
|
| 476 |
+
)
|
| 477 |
+
(cond_stage_model): T5ACEPlusClipFluxEmbedder T5ACEPlusClipFluxEmbedder(
|
| 478 |
+
(t5_model): ACEHFEmbedder ACEHFEmbedder(
|
| 479 |
+
(hf_module): T5EncoderModel(
|
| 480 |
+
(shared): Embedding(32128, 4096)
|
| 481 |
+
(encoder): T5Stack(
|
| 482 |
+
(embed_tokens): Embedding(32128, 4096)
|
| 483 |
+
(block): ModuleList(
|
| 484 |
+
(0): T5Block(
|
| 485 |
+
(layer): ModuleList(
|
| 486 |
+
(0): T5LayerSelfAttention(
|
| 487 |
+
(SelfAttention): T5Attention(
|
| 488 |
+
(q): Linear(in_features=4096, out_features=4096, bias=False)
|
| 489 |
+
(k): Linear(in_features=4096, out_features=4096, bias=False)
|
| 490 |
+
(v): Linear(in_features=4096, out_features=4096, bias=False)
|
| 491 |
+
(o): Linear(in_features=4096, out_features=4096, bias=False)
|
| 492 |
+
(relative_attention_bias): Embedding(32, 64)
|
| 493 |
+
)
|
| 494 |
+
(layer_norm): T5LayerNorm()
|
| 495 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 496 |
+
)
|
| 497 |
+
(1): T5LayerFF(
|
| 498 |
+
(DenseReluDense): T5DenseGatedActDense(
|
| 499 |
+
(wi_0): Linear(in_features=4096, out_features=10240, bias=False)
|
| 500 |
+
(wi_1): Linear(in_features=4096, out_features=10240, bias=False)
|
| 501 |
+
(wo): Linear(in_features=10240, out_features=4096, bias=False)
|
| 502 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 503 |
+
(act): NewGELUActivation()
|
| 504 |
+
)
|
| 505 |
+
(layer_norm): T5LayerNorm()
|
| 506 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 507 |
+
)
|
| 508 |
+
)
|
| 509 |
+
)
|
| 510 |
+
(1-23): 23 x T5Block(
|
| 511 |
+
(layer): ModuleList(
|
| 512 |
+
(0): T5LayerSelfAttention(
|
| 513 |
+
(SelfAttention): T5Attention(
|
| 514 |
+
(q): Linear(in_features=4096, out_features=4096, bias=False)
|
| 515 |
+
(k): Linear(in_features=4096, out_features=4096, bias=False)
|
| 516 |
+
(v): Linear(in_features=4096, out_features=4096, bias=False)
|
| 517 |
+
(o): Linear(in_features=4096, out_features=4096, bias=False)
|
| 518 |
+
)
|
| 519 |
+
(layer_norm): T5LayerNorm()
|
| 520 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 521 |
+
)
|
| 522 |
+
(1): T5LayerFF(
|
| 523 |
+
(DenseReluDense): T5DenseGatedActDense(
|
| 524 |
+
(wi_0): Linear(in_features=4096, out_features=10240, bias=False)
|
| 525 |
+
(wi_1): Linear(in_features=4096, out_features=10240, bias=False)
|
| 526 |
+
(wo): Linear(in_features=10240, out_features=4096, bias=False)
|
| 527 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 528 |
+
(act): NewGELUActivation()
|
| 529 |
+
)
|
| 530 |
+
(layer_norm): T5LayerNorm()
|
| 531 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 532 |
+
)
|
| 533 |
+
)
|
| 534 |
+
)
|
| 535 |
+
)
|
| 536 |
+
(final_layer_norm): T5LayerNorm()
|
| 537 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 538 |
+
)
|
| 539 |
+
)
|
| 540 |
+
)
|
| 541 |
+
(clip_model): ACEHFEmbedder ACEHFEmbedder(
|
| 542 |
+
(hf_module): CLIPTextModel(
|
| 543 |
+
(text_model): CLIPTextTransformer(
|
| 544 |
+
(embeddings): CLIPTextEmbeddings(
|
| 545 |
+
(token_embedding): Embedding(49408, 768)
|
| 546 |
+
(position_embedding): Embedding(77, 768)
|
| 547 |
+
)
|
| 548 |
+
(encoder): CLIPEncoder(
|
| 549 |
+
(layers): ModuleList(
|
| 550 |
+
(0-11): 12 x CLIPEncoderLayer(
|
| 551 |
+
(self_attn): CLIPSdpaAttention(
|
| 552 |
+
(k_proj): Linear(in_features=768, out_features=768, bias=True)
|
| 553 |
+
(v_proj): Linear(in_features=768, out_features=768, bias=True)
|
| 554 |
+
(q_proj): Linear(in_features=768, out_features=768, bias=True)
|
| 555 |
+
(out_proj): Linear(in_features=768, out_features=768, bias=True)
|
| 556 |
+
)
|
| 557 |
+
(layer_norm1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 558 |
+
(mlp): CLIPMLP(
|
| 559 |
+
(activation_fn): QuickGELUActivation()
|
| 560 |
+
(fc1): Linear(in_features=768, out_features=3072, bias=True)
|
| 561 |
+
(fc2): Linear(in_features=3072, out_features=768, bias=True)
|
| 562 |
+
)
|
| 563 |
+
(layer_norm2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 564 |
+
)
|
| 565 |
+
)
|
| 566 |
+
)
|
| 567 |
+
(final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 568 |
+
)
|
| 569 |
+
)
|
| 570 |
+
)
|
| 571 |
+
)
|
| 572 |
+
)
|
| 573 |
+
)
|
| 574 |
+
scepter [INFO] 2025-07-04 03:14:54,956 [File: log.py Function: before_solve at line 260] Tensorboard: save to ./examples/exp_example/20250704031317/tensorboard
|
| 575 |
+
scepter [INFO] 2025-07-04 03:26:47,746 [File: log.py Function: _print_iter_log at line 71] Stage [train] iter: [20/100000], data_time: 29.7654(29.7654), time: 35.6384(35.6384), loss: 0.1792(0.1792), throughput: 23616/day, all_throughput: 20, pg0_lr: 0.001000, scale: 1.000000, [13mins 28secs 0.02%(46days 18hours 54mins 17secs)]
|
| 576 |
+
scepter [INFO] 2025-07-04 03:38:34,905 [File: log.py Function: _print_iter_log at line 71] Stage [train] iter: [40/100000], data_time: 29.6163(29.6909), time: 35.3580(35.4982), loss: 0.1530(0.1661), throughput: 23655/day, all_throughput: 40, pg0_lr: 0.001000, scale: 1.000000, [25mins 15secs 0.04%(43days 20hours 13mins 35secs)]
|
| 577 |
+
scepter [INFO] 2025-07-04 03:44:27,364 [File: checkpoint.py Function: after_iter at line 109] Saving checkpoint after 50 steps
|
| 578 |
+
scepter [INFO] 2025-07-04 03:50:16,375 [File: log.py Function: _print_iter_log at line 71] Stage [train] iter: [60/100000], data_time: 29.3468(29.5762), time: 35.0735(35.3566), loss: 0.1463(0.1595), throughput: 23738/day, all_throughput: 60, pg0_lr: 0.001000, scale: 1.000000, [36mins 57secs 0.06%(42days 17hours 54mins 12secs)]
|
| 579 |
+
scepter [INFO] 2025-07-04 04:02:09,647 [File: log.py Function: _print_iter_log at line 71] Stage [train] iter: [80/100000], data_time: 29.9454(29.6685), time: 35.6637(35.4334), loss: 0.1684(0.1617), throughput: 23803/day, all_throughput: 80, pg0_lr: 0.001000, scale: 1.000000, [48mins 50secs 0.08%(42days 8hours 44mins 22secs)]
|
| 580 |
+
scepter [INFO] 2025-07-04 04:14:04,519 [File: log.py Function: _print_iter_log at line 71] Stage [train] iter: [100/100000], data_time: 29.8545(29.7057), time: 35.7436(35.4954), loss: 0.1803(0.1654), throughput: 23744/day, all_throughput: 100, pg0_lr: 0.001000, scale: 1.000000, [1hours 45secs 0.10%(42days 3hours 36mins 20secs)]
|
| 581 |
+
scepter [INFO] 2025-07-04 04:14:04,521 [File: checkpoint.py Function: after_iter at line 109] Saving checkpoint after 100 steps
|
ACE_plus/examples/exp_example/20250704031317/train.yaml
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ENV:
|
| 2 |
+
BACKEND: nccl
|
| 3 |
+
SEED: 1999
|
| 4 |
+
SOLVER:
|
| 5 |
+
# NAME DESCRIPTION: TYPE: default: 'LatentUfitSolver'
|
| 6 |
+
NAME: FormalACEPlusSolver
|
| 7 |
+
# MAX_STEPS DESCRIPTION: The total steps for training. TYPE: int default: 100000
|
| 8 |
+
MAX_STEPS: 100000
|
| 9 |
+
# USE_AMP DESCRIPTION: Use amp to surpport mix precision or not, default is False. TYPE: bool default: False
|
| 10 |
+
USE_AMP: True
|
| 11 |
+
# DTYPE DESCRIPTION: The precision for training. TYPE: str default: 'float32'
|
| 12 |
+
DTYPE: bfloat16
|
| 13 |
+
ENABLE_GRADSCALER: False
|
| 14 |
+
# USE_FAIRSCALE DESCRIPTION: Use fairscale as the backend of ddp, default False. TYPE: bool default: False
|
| 15 |
+
USE_FAIRSCALE: False
|
| 16 |
+
USE_ORIG_PARAMS: True
|
| 17 |
+
USE_FSDP: True # lora use ddp(USE_FSDP=False), else use fsdp(USE_FSDP=True)
|
| 18 |
+
# LOAD_MODEL_ONLY DESCRIPTION: Only load the model rather than the optimizer and schedule, default is False. TYPE: bool default: False
|
| 19 |
+
LOAD_MODEL_ONLY: False
|
| 20 |
+
# RESUME_FROM DESCRIPTION: Resume from some state of training! TYPE: str default: ''
|
| 21 |
+
RESUME_FROM:
|
| 22 |
+
# WORK_DIR DESCRIPTION: Save dir of the training log or model. TYPE: str default: ''
|
| 23 |
+
WORK_DIR: ./examples/exp_example/
|
| 24 |
+
# LOG_FILE DESCRIPTION: Save log path. TYPE: str default: ''
|
| 25 |
+
LOG_FILE: std_log.txt
|
| 26 |
+
# LOG_TRAIN_NUM DESCRIPTION: The number samples used to log in training phase. TYPE: int default: -1
|
| 27 |
+
LOG_TRAIN_NUM: 16
|
| 28 |
+
# FSDP_REDUCE_DTYPE DESCRIPTION: The dtype of reduce in FSDP. TYPE: str default: 'float16'
|
| 29 |
+
FSDP_REDUCE_DTYPE: float32
|
| 30 |
+
# FSDP_BUFFER_DTYPE DESCRIPTION: The dtype of buffer in FSDP. TYPE: str default: 'float16'
|
| 31 |
+
FSDP_BUFFER_DTYPE: float32
|
| 32 |
+
# FSDP_SHARD_MODULES DESCRIPTION: The modules to be sharded in FSDP. TYPE: list default: ['model']
|
| 33 |
+
FSDP_SHARD_MODULES:
|
| 34 |
+
- MODULE: 'model.model'
|
| 35 |
+
FSDP_GROUP: [ 'single_blocks', 'double_blocks']
|
| 36 |
+
- MODULE: 'cond_stage_model.t5_model.hf_module.encoder'
|
| 37 |
+
FSDP_GROUP: [ 'block' ] #
|
| 38 |
+
SAVE_MODULES: [ 'model'] #
|
| 39 |
+
TRAIN_MODULES: ['model']
|
| 40 |
+
|
| 41 |
+
#
|
| 42 |
+
FILE_SYSTEM:
|
| 43 |
+
- NAME: HuggingfaceFs
|
| 44 |
+
TEMP_DIR: ./cache
|
| 45 |
+
- NAME: ModelscopeFs
|
| 46 |
+
TEMP_DIR: ./cache
|
| 47 |
+
#
|
| 48 |
+
MODEL:
|
| 49 |
+
NAME: LatentDiffusionACEPlus
|
| 50 |
+
PARAMETERIZATION: rf
|
| 51 |
+
TIMESTEPS: 1000
|
| 52 |
+
GUIDE_SCALE: 1.0
|
| 53 |
+
PRETRAINED_MODEL:
|
| 54 |
+
IGNORE_KEYS: [ ]
|
| 55 |
+
USE_EMA: False
|
| 56 |
+
EVAL_EMA: False
|
| 57 |
+
SIZE_FACTOR: 8
|
| 58 |
+
DIFFUSION:
|
| 59 |
+
NAME: DiffusionFluxRF
|
| 60 |
+
PREDICTION_TYPE: raw
|
| 61 |
+
NOISE_NORM: True
|
| 62 |
+
# NOISE_SCHEDULER DESCRIPTION: TYPE: default: ''
|
| 63 |
+
NOISE_SCHEDULER:
|
| 64 |
+
NAME: FlowMatchFluxShiftScheduler
|
| 65 |
+
SHIFT: False
|
| 66 |
+
PRE_T_SAMPLE: True
|
| 67 |
+
PRE_T_SAMPLE_FOLD: 1
|
| 68 |
+
SIGMOID_SCALE: 1
|
| 69 |
+
BASE_SHIFT: 0.5
|
| 70 |
+
MAX_SHIFT: 1.15
|
| 71 |
+
SAMPLER_SCHEDULER:
|
| 72 |
+
NAME: FlowMatchFluxShiftScheduler
|
| 73 |
+
SHIFT: True
|
| 74 |
+
PRE_T_SAMPLE: False
|
| 75 |
+
SIGMOID_SCALE: 1
|
| 76 |
+
BASE_SHIFT: 0.5
|
| 77 |
+
MAX_SHIFT: 1.15
|
| 78 |
+
|
| 79 |
+
#
|
| 80 |
+
DIFFUSION_MODEL:
|
| 81 |
+
# NAME DESCRIPTION: TYPE: default: 'Flux'
|
| 82 |
+
NAME: FluxMRModiACEPlus
|
| 83 |
+
PRETRAINED_MODEL: /home/Ubuntu/Downloads/Unmodel/Reference_models/flux1-fill-dev.safetensors
|
| 84 |
+
# IN_CHANNELS DESCRIPTION: model's input channels. TYPE: int default: 64
|
| 85 |
+
IN_CHANNELS: 448
|
| 86 |
+
# OUT_CHANNELS DESCRIPTION: model's input channels. TYPE: int default: 64
|
| 87 |
+
OUT_CHANNELS: 64
|
| 88 |
+
# HIDDEN_SIZE DESCRIPTION: model's hidden size. TYPE: int default: 1024
|
| 89 |
+
HIDDEN_SIZE: 3072
|
| 90 |
+
REDUX_DIM: 1152
|
| 91 |
+
# NUM_HEADS DESCRIPTION: number of heads in the transformer. TYPE: int default: 16
|
| 92 |
+
NUM_HEADS: 24
|
| 93 |
+
# AXES_DIM DESCRIPTION: dimensions of the axes of the positional encoding. TYPE: list default: [16, 56, 56]
|
| 94 |
+
AXES_DIM: [ 16, 56, 56 ]
|
| 95 |
+
# THETA DESCRIPTION: theta for positional encoding. TYPE: int default: 10000
|
| 96 |
+
THETA: 10000
|
| 97 |
+
# VEC_IN_DIM DESCRIPTION: dimension of the vector input. TYPE: int default: 768
|
| 98 |
+
VEC_IN_DIM: 768
|
| 99 |
+
# GUIDANCE_EMBED DESCRIPTION: whether to use guidance embedding. TYPE: bool default: False
|
| 100 |
+
GUIDANCE_EMBED: True
|
| 101 |
+
# CONTEXT_IN_DIM DESCRIPTION: dimension of the context input. TYPE: int default: 4096
|
| 102 |
+
CONTEXT_IN_DIM: 4096
|
| 103 |
+
# MLP_RATIO DESCRIPTION: ratio of mlp hidden size to hidden size. TYPE: float default: 4.0
|
| 104 |
+
MLP_RATIO: 4.0
|
| 105 |
+
# QKV_BIAS DESCRIPTION: whether to use bias in qkv projection. TYPE: bool default: True
|
| 106 |
+
QKV_BIAS: True
|
| 107 |
+
# DEPTH DESCRIPTION: number of transformer blocks. TYPE: int default: 19
|
| 108 |
+
DEPTH: 19
|
| 109 |
+
# DEPTH_SINGLE_BLOCKS DESCRIPTION: number of transformer blocks in the single stream block. TYPE: int default: 38
|
| 110 |
+
DEPTH_SINGLE_BLOCKS: 38
|
| 111 |
+
# ATTN_BACKEND:setting 'flash_attn' to use flash_attn2, if the version of pytorch > 2.4.0, using 'pytorch' to use pytorch's implementation
|
| 112 |
+
ATTN_BACKEND: flash_attn
|
| 113 |
+
# USE_GRAD_CHECKPOINT: setting gc to true can decrease the memory usage.
|
| 114 |
+
USE_GRAD_CHECKPOINT: True
|
| 115 |
+
|
| 116 |
+
#
|
| 117 |
+
FIRST_STAGE_MODEL:
|
| 118 |
+
NAME: AutoencoderKLFlux
|
| 119 |
+
EMBED_DIM: 16
|
| 120 |
+
PRETRAINED_MODEL: /home/Ubuntu/Downloads/Unmodel/Reference_models/ae.safetensors
|
| 121 |
+
IGNORE_KEYS: [ ]
|
| 122 |
+
BATCH_SIZE: 8
|
| 123 |
+
USE_CONV: False
|
| 124 |
+
SCALE_FACTOR: 0.3611
|
| 125 |
+
SHIFT_FACTOR: 0.1159
|
| 126 |
+
#
|
| 127 |
+
ENCODER:
|
| 128 |
+
NAME: Encoder
|
| 129 |
+
CH: 128
|
| 130 |
+
OUT_CH: 3
|
| 131 |
+
NUM_RES_BLOCKS: 2
|
| 132 |
+
IN_CHANNELS: 3
|
| 133 |
+
ATTN_RESOLUTIONS: [ ]
|
| 134 |
+
CH_MULT: [ 1, 2, 4, 4 ]
|
| 135 |
+
Z_CHANNELS: 16
|
| 136 |
+
DOUBLE_Z: True
|
| 137 |
+
DROPOUT: 0.0
|
| 138 |
+
RESAMP_WITH_CONV: True
|
| 139 |
+
#
|
| 140 |
+
DECODER:
|
| 141 |
+
NAME: Decoder
|
| 142 |
+
CH: 128
|
| 143 |
+
OUT_CH: 3
|
| 144 |
+
NUM_RES_BLOCKS: 2
|
| 145 |
+
IN_CHANNELS: 3
|
| 146 |
+
ATTN_RESOLUTIONS: [ ]
|
| 147 |
+
CH_MULT: [ 1, 2, 4, 4 ]
|
| 148 |
+
Z_CHANNELS: 16
|
| 149 |
+
DROPOUT: 0.0
|
| 150 |
+
RESAMP_WITH_CONV: True
|
| 151 |
+
GIVE_PRE_END: False
|
| 152 |
+
TANH_OUT: False
|
| 153 |
+
#
|
| 154 |
+
COND_STAGE_MODEL:
|
| 155 |
+
# NAME DESCRIPTION: TYPE: default: 'T5PlusClipFluxEmbedder'
|
| 156 |
+
NAME: T5ACEPlusClipFluxEmbedder
|
| 157 |
+
# T5_MODEL DESCRIPTION: TYPE: default: ''
|
| 158 |
+
T5_MODEL:
|
| 159 |
+
# NAME DESCRIPTION: TYPE: default: 'HFEmbedder'
|
| 160 |
+
NAME: ACEHFEmbedder
|
| 161 |
+
# HF_MODEL_CLS DESCRIPTION: huggingface cls in transfomer TYPE: NoneType default: None
|
| 162 |
+
HF_MODEL_CLS: T5EncoderModel
|
| 163 |
+
# MODEL_PATH DESCRIPTION: model folder path TYPE: NoneType default: None
|
| 164 |
+
MODEL_PATH: /home/Ubuntu/Downloads/Unmodel/Reference_models/t5_xxl/
|
| 165 |
+
# HF_TOKENIZER_CLS DESCRIPTION: huggingface cls in transfomer TYPE: NoneType default: None
|
| 166 |
+
HF_TOKENIZER_CLS: T5Tokenizer
|
| 167 |
+
# TOKENIZER_PATH DESCRIPTION: tokenizer folder path TYPE: NoneType default: None
|
| 168 |
+
TOKENIZER_PATH: /home/Ubuntu/Downloads/Unmodel/Reference_models/t5_xxl/
|
| 169 |
+
ADDED_IDENTIFIER: [ '<img>','{image}', '{caption}', '{mask}', '{ref_image}', '{image1}', '{image2}', '{image3}', '{image4}', '{image5}', '{image6}', '{image7}', '{image8}', '{image9}' ]
|
| 170 |
+
# MAX_LENGTH DESCRIPTION: max length of input TYPE: int default: 77
|
| 171 |
+
MAX_LENGTH: 512
|
| 172 |
+
# OUTPUT_KEY DESCRIPTION: output key TYPE: str default: 'last_hidden_state'
|
| 173 |
+
OUTPUT_KEY: last_hidden_state
|
| 174 |
+
# D_TYPE DESCRIPTION: dtype TYPE: str default: 'bfloat16'
|
| 175 |
+
D_TYPE: bfloat16
|
| 176 |
+
# BATCH_INFER DESCRIPTION: batch infer TYPE: bool default: False
|
| 177 |
+
BATCH_INFER: False
|
| 178 |
+
CLEAN: whitespace
|
| 179 |
+
# CLIP_MODEL DESCRIPTION: TYPE: default: ''
|
| 180 |
+
CLIP_MODEL:
|
| 181 |
+
# NAME DESCRIPTION: TYPE: default: 'HFEmbedder'
|
| 182 |
+
NAME: ACEHFEmbedder
|
| 183 |
+
# HF_MODEL_CLS DESCRIPTION: huggingface cls in transfomer TYPE: NoneType default: None
|
| 184 |
+
HF_MODEL_CLS: CLIPTextModel
|
| 185 |
+
# MODEL_PATH DESCRIPTION: model folder path TYPE: NoneType default: None
|
| 186 |
+
MODEL_PATH: /home/Ubuntu/Downloads/Unmodel/Reference_models/clip_l/
|
| 187 |
+
# HF_TOKENIZER_CLS DESCRIPTION: huggingface cls in transfomer TYPE: NoneType default: None
|
| 188 |
+
HF_TOKENIZER_CLS: CLIPTokenizer
|
| 189 |
+
# TOKENIZER_PATH DESCRIPTION: tokenizer folder path TYPE: NoneType default: None
|
| 190 |
+
TOKENIZER_PATH: /home/Ubuntu/Downloads/Unmodel/Reference_models/clip_l/
|
| 191 |
+
# MAX_LENGTH DESCRIPTION: max length of input TYPE: int default: 77
|
| 192 |
+
MAX_LENGTH: 77
|
| 193 |
+
# OUTPUT_KEY DESCRIPTION: output key TYPE: str default: 'last_hidden_state'
|
| 194 |
+
OUTPUT_KEY: pooler_output
|
| 195 |
+
# D_TYPE DESCRIPTION: dtype TYPE: str default: 'bfloat16'
|
| 196 |
+
D_TYPE: bfloat16
|
| 197 |
+
# BATCH_INFER DESCRIPTION: batch infer TYPE: bool default: False
|
| 198 |
+
BATCH_INFER: True
|
| 199 |
+
CLEAN: whitespace
|
| 200 |
+
TUNER:
|
| 201 |
+
# THE LORA PARAMETERS
|
| 202 |
+
- NAME: SwiftLoRA
|
| 203 |
+
R: 64
|
| 204 |
+
LORA_ALPHA: 64
|
| 205 |
+
LORA_DROPOUT: 0.0
|
| 206 |
+
BIAS: "none"
|
| 207 |
+
TARGET_MODULES: "(model.double_blocks.*(.qkv|.img_mlp.0|.img_mlp.2|.txt_mlp.0|.txt_mlp.2|.proj|.img_mod.lin|.txt_mod.lin))|(model.single_blocks.*(.linear1|.linear2|.modulation.lin))$"
|
| 208 |
+
#
|
| 209 |
+
SAMPLE_ARGS:
|
| 210 |
+
SAMPLE_STEPS: 28
|
| 211 |
+
SAMPLER: flow_euler
|
| 212 |
+
SEED: 42
|
| 213 |
+
IMAGE_SIZE: [ 2048, 2048 ]
|
| 214 |
+
#IMAGE_SIZE: [ 1024, 1024 ]
|
| 215 |
+
GUIDE_SCALE: 50
|
| 216 |
+
|
| 217 |
+
LR_SCHEDULER:
|
| 218 |
+
NAME: StepAnnealingLR
|
| 219 |
+
WARMUP_STEPS: 0
|
| 220 |
+
TOTAL_STEPS: 100000
|
| 221 |
+
DECAY_MODE: 'cosine'
|
| 222 |
+
#
|
| 223 |
+
OPTIMIZER:
|
| 224 |
+
NAME: AdamW
|
| 225 |
+
LEARNING_RATE: 1e-3
|
| 226 |
+
BETAS: [ 0.9, 0.999 ]
|
| 227 |
+
EPS: 1e-6
|
| 228 |
+
WEIGHT_DECAY: 1e-2
|
| 229 |
+
AMSGRAD: False
|
| 230 |
+
#
|
| 231 |
+
TRAIN_DATA:
|
| 232 |
+
NAME: ACEPlusDataset
|
| 233 |
+
MODE: train
|
| 234 |
+
DATA_LIST: data/train.csv
|
| 235 |
+
DELIMITER: "#;#"
|
| 236 |
+
MODIFY_MODE: True
|
| 237 |
+
# input_image, input_mask, input_reference_image, target_image, instruction, task_type
|
| 238 |
+
FIELDS: ["edit_image", "edit_mask", "ref_image", "target_image", "prompt", "data_type"]
|
| 239 |
+
PATH_PREFIX: ""
|
| 240 |
+
EDIT_TYPE_LIST: []
|
| 241 |
+
MAX_SEQ_LEN: 4096
|
| 242 |
+
# MAX_SEQ_LEN: 2048 -Vijay
|
| 243 |
+
D: 16
|
| 244 |
+
PIN_MEMORY: True
|
| 245 |
+
BATCH_SIZE: 1
|
| 246 |
+
NUM_WORKERS: 4
|
| 247 |
+
SAMPLER:
|
| 248 |
+
NAME: LoopSampler
|
| 249 |
+
|
| 250 |
+
EVAL_DATA:
|
| 251 |
+
NAME: ACEPlusDataset
|
| 252 |
+
MODE: eval
|
| 253 |
+
DATA_LIST: data/train.csv
|
| 254 |
+
DELIMITER: "#;#"
|
| 255 |
+
MODIFY_MODE: True
|
| 256 |
+
# input_image, input_mask, input_reference_image, target_image, instruction, task_type
|
| 257 |
+
FIELDS: [ "edit_image", "edit_mask", "ref_image", "target_image", "prompt", "data_type" ]
|
| 258 |
+
PATH_PREFIX: ""
|
| 259 |
+
EDIT_TYPE_LIST: [ ]
|
| 260 |
+
MAX_SEQ_LEN: 4096
|
| 261 |
+
# MAX_SEQ_LEN: 2048 -Vijay
|
| 262 |
+
D: 16
|
| 263 |
+
PIN_MEMORY: True
|
| 264 |
+
BATCH_SIZE: 1
|
| 265 |
+
NUM_WORKERS: 4
|
| 266 |
+
|
| 267 |
+
TRAIN_HOOKS:
|
| 268 |
+
- NAME: ACEBackwardHook
|
| 269 |
+
GRADIENT_CLIP: 1.0
|
| 270 |
+
PRIORITY: 10
|
| 271 |
+
- NAME: LogHook
|
| 272 |
+
LOG_INTERVAL: 20
|
| 273 |
+
- NAME: ACECheckpointHook
|
| 274 |
+
INTERVAL: 50
|
| 275 |
+
#INTERVAL: 250 --Vijay
|
| 276 |
+
PRIORITY: 200
|
| 277 |
+
DISABLE_SNAPSHOT: True
|
| 278 |
+
- NAME: ProbeDataHook
|
| 279 |
+
PROB_INTERVAL: 10
|
| 280 |
+
#PROB_INTERVAL: 50 -Vijay
|
| 281 |
+
PRIORITY: 0
|
| 282 |
+
- NAME: TensorboardLogHook
|
| 283 |
+
LOG_INTERVAL: 50
|
| 284 |
+
EVAL_HOOKS:
|
| 285 |
+
- NAME: ProbeDataHook
|
| 286 |
+
PROB_INTERVAL: 10
|
| 287 |
+
#PROB_INTERVAL: 50 -Vijay
|
| 288 |
+
PRIORITY: 0
|
ACE_plus/flashenv/bin/Activate.ps1
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<#
|
| 2 |
+
.Synopsis
|
| 3 |
+
Activate a Python virtual environment for the current PowerShell session.
|
| 4 |
+
|
| 5 |
+
.Description
|
| 6 |
+
Pushes the python executable for a virtual environment to the front of the
|
| 7 |
+
$Env:PATH environment variable and sets the prompt to signify that you are
|
| 8 |
+
in a Python virtual environment. Makes use of the command line switches as
|
| 9 |
+
well as the `pyvenv.cfg` file values present in the virtual environment.
|
| 10 |
+
|
| 11 |
+
.Parameter VenvDir
|
| 12 |
+
Path to the directory that contains the virtual environment to activate. The
|
| 13 |
+
default value for this is the parent of the directory that the Activate.ps1
|
| 14 |
+
script is located within.
|
| 15 |
+
|
| 16 |
+
.Parameter Prompt
|
| 17 |
+
The prompt prefix to display when this virtual environment is activated. By
|
| 18 |
+
default, this prompt is the name of the virtual environment folder (VenvDir)
|
| 19 |
+
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
| 20 |
+
|
| 21 |
+
.Example
|
| 22 |
+
Activate.ps1
|
| 23 |
+
Activates the Python virtual environment that contains the Activate.ps1 script.
|
| 24 |
+
|
| 25 |
+
.Example
|
| 26 |
+
Activate.ps1 -Verbose
|
| 27 |
+
Activates the Python virtual environment that contains the Activate.ps1 script,
|
| 28 |
+
and shows extra information about the activation as it executes.
|
| 29 |
+
|
| 30 |
+
.Example
|
| 31 |
+
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
| 32 |
+
Activates the Python virtual environment located in the specified location.
|
| 33 |
+
|
| 34 |
+
.Example
|
| 35 |
+
Activate.ps1 -Prompt "MyPython"
|
| 36 |
+
Activates the Python virtual environment that contains the Activate.ps1 script,
|
| 37 |
+
and prefixes the current prompt with the specified string (surrounded in
|
| 38 |
+
parentheses) while the virtual environment is active.
|
| 39 |
+
|
| 40 |
+
.Notes
|
| 41 |
+
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
| 42 |
+
execution policy for the user. You can do this by issuing the following PowerShell
|
| 43 |
+
command:
|
| 44 |
+
|
| 45 |
+
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
| 46 |
+
|
| 47 |
+
For more information on Execution Policies:
|
| 48 |
+
https://go.microsoft.com/fwlink/?LinkID=135170
|
| 49 |
+
|
| 50 |
+
#>
|
| 51 |
+
Param(
|
| 52 |
+
[Parameter(Mandatory = $false)]
|
| 53 |
+
[String]
|
| 54 |
+
$VenvDir,
|
| 55 |
+
[Parameter(Mandatory = $false)]
|
| 56 |
+
[String]
|
| 57 |
+
$Prompt
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
<# Function declarations --------------------------------------------------- #>
|
| 61 |
+
|
| 62 |
+
<#
|
| 63 |
+
.Synopsis
|
| 64 |
+
Remove all shell session elements added by the Activate script, including the
|
| 65 |
+
addition of the virtual environment's Python executable from the beginning of
|
| 66 |
+
the PATH variable.
|
| 67 |
+
|
| 68 |
+
.Parameter NonDestructive
|
| 69 |
+
If present, do not remove this function from the global namespace for the
|
| 70 |
+
session.
|
| 71 |
+
|
| 72 |
+
#>
|
| 73 |
+
function global:deactivate ([switch]$NonDestructive) {
|
| 74 |
+
# Revert to original values
|
| 75 |
+
|
| 76 |
+
# The prior prompt:
|
| 77 |
+
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
| 78 |
+
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
| 79 |
+
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
# The prior PYTHONHOME:
|
| 83 |
+
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
| 84 |
+
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
| 85 |
+
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
# The prior PATH:
|
| 89 |
+
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
| 90 |
+
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
| 91 |
+
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
# Just remove the VIRTUAL_ENV altogether:
|
| 95 |
+
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
| 96 |
+
Remove-Item -Path env:VIRTUAL_ENV
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
| 100 |
+
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
| 101 |
+
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
| 105 |
+
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
| 106 |
+
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
# Leave deactivate function in the global namespace if requested:
|
| 110 |
+
if (-not $NonDestructive) {
|
| 111 |
+
Remove-Item -Path function:deactivate
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
<#
|
| 116 |
+
.Description
|
| 117 |
+
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
| 118 |
+
given folder, and returns them in a map.
|
| 119 |
+
|
| 120 |
+
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
| 121 |
+
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
| 122 |
+
then it is considered a `key = value` line. The left hand string is the key,
|
| 123 |
+
the right hand is the value.
|
| 124 |
+
|
| 125 |
+
If the value starts with a `'` or a `"` then the first and last character is
|
| 126 |
+
stripped from the value before being captured.
|
| 127 |
+
|
| 128 |
+
.Parameter ConfigDir
|
| 129 |
+
Path to the directory that contains the `pyvenv.cfg` file.
|
| 130 |
+
#>
|
| 131 |
+
function Get-PyVenvConfig(
|
| 132 |
+
[String]
|
| 133 |
+
$ConfigDir
|
| 134 |
+
) {
|
| 135 |
+
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
| 136 |
+
|
| 137 |
+
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
| 138 |
+
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
| 139 |
+
|
| 140 |
+
# An empty map will be returned if no config file is found.
|
| 141 |
+
$pyvenvConfig = @{ }
|
| 142 |
+
|
| 143 |
+
if ($pyvenvConfigPath) {
|
| 144 |
+
|
| 145 |
+
Write-Verbose "File exists, parse `key = value` lines"
|
| 146 |
+
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
| 147 |
+
|
| 148 |
+
$pyvenvConfigContent | ForEach-Object {
|
| 149 |
+
$keyval = $PSItem -split "\s*=\s*", 2
|
| 150 |
+
if ($keyval[0] -and $keyval[1]) {
|
| 151 |
+
$val = $keyval[1]
|
| 152 |
+
|
| 153 |
+
# Remove extraneous quotations around a string value.
|
| 154 |
+
if ("'""".Contains($val.Substring(0, 1))) {
|
| 155 |
+
$val = $val.Substring(1, $val.Length - 2)
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
$pyvenvConfig[$keyval[0]] = $val
|
| 159 |
+
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
| 160 |
+
}
|
| 161 |
+
}
|
| 162 |
+
}
|
| 163 |
+
return $pyvenvConfig
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
<# Begin Activate script --------------------------------------------------- #>
|
| 168 |
+
|
| 169 |
+
# Determine the containing directory of this script
|
| 170 |
+
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
| 171 |
+
$VenvExecDir = Get-Item -Path $VenvExecPath
|
| 172 |
+
|
| 173 |
+
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
| 174 |
+
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
| 175 |
+
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
| 176 |
+
|
| 177 |
+
# Set values required in priority: CmdLine, ConfigFile, Default
|
| 178 |
+
# First, get the location of the virtual environment, it might not be
|
| 179 |
+
# VenvExecDir if specified on the command line.
|
| 180 |
+
if ($VenvDir) {
|
| 181 |
+
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
| 182 |
+
}
|
| 183 |
+
else {
|
| 184 |
+
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
| 185 |
+
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
| 186 |
+
Write-Verbose "VenvDir=$VenvDir"
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
# Next, read the `pyvenv.cfg` file to determine any required value such
|
| 190 |
+
# as `prompt`.
|
| 191 |
+
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
| 192 |
+
|
| 193 |
+
# Next, set the prompt from the command line, or the config file, or
|
| 194 |
+
# just use the name of the virtual environment folder.
|
| 195 |
+
if ($Prompt) {
|
| 196 |
+
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
| 197 |
+
}
|
| 198 |
+
else {
|
| 199 |
+
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
| 200 |
+
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
| 201 |
+
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
| 202 |
+
$Prompt = $pyvenvCfg['prompt'];
|
| 203 |
+
}
|
| 204 |
+
else {
|
| 205 |
+
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
| 206 |
+
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
| 207 |
+
$Prompt = Split-Path -Path $venvDir -Leaf
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
Write-Verbose "Prompt = '$Prompt'"
|
| 212 |
+
Write-Verbose "VenvDir='$VenvDir'"
|
| 213 |
+
|
| 214 |
+
# Deactivate any currently active virtual environment, but leave the
|
| 215 |
+
# deactivate function in place.
|
| 216 |
+
deactivate -nondestructive
|
| 217 |
+
|
| 218 |
+
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
| 219 |
+
# that there is an activated venv.
|
| 220 |
+
$env:VIRTUAL_ENV = $VenvDir
|
| 221 |
+
|
| 222 |
+
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
| 223 |
+
|
| 224 |
+
Write-Verbose "Setting prompt to '$Prompt'"
|
| 225 |
+
|
| 226 |
+
# Set the prompt to include the env name
|
| 227 |
+
# Make sure _OLD_VIRTUAL_PROMPT is global
|
| 228 |
+
function global:_OLD_VIRTUAL_PROMPT { "" }
|
| 229 |
+
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
| 230 |
+
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
| 231 |
+
|
| 232 |
+
function global:prompt {
|
| 233 |
+
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
| 234 |
+
_OLD_VIRTUAL_PROMPT
|
| 235 |
+
}
|
| 236 |
+
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
# Clear PYTHONHOME
|
| 240 |
+
if (Test-Path -Path Env:PYTHONHOME) {
|
| 241 |
+
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
| 242 |
+
Remove-Item -Path Env:PYTHONHOME
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
# Add the venv to the PATH
|
| 246 |
+
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
| 247 |
+
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
ACE_plus/flashenv/bin/activate
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file must be used with "source bin/activate" *from bash*
|
| 2 |
+
# you cannot run it directly
|
| 3 |
+
|
| 4 |
+
deactivate () {
|
| 5 |
+
# reset old environment variables
|
| 6 |
+
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
| 7 |
+
PATH="${_OLD_VIRTUAL_PATH:-}"
|
| 8 |
+
export PATH
|
| 9 |
+
unset _OLD_VIRTUAL_PATH
|
| 10 |
+
fi
|
| 11 |
+
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
| 12 |
+
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
| 13 |
+
export PYTHONHOME
|
| 14 |
+
unset _OLD_VIRTUAL_PYTHONHOME
|
| 15 |
+
fi
|
| 16 |
+
|
| 17 |
+
# This should detect bash and zsh, which have a hash command that must
|
| 18 |
+
# be called to get it to forget past commands. Without forgetting
|
| 19 |
+
# past commands the $PATH changes we made may not be respected
|
| 20 |
+
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
| 21 |
+
hash -r 2> /dev/null
|
| 22 |
+
fi
|
| 23 |
+
|
| 24 |
+
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
| 25 |
+
PS1="${_OLD_VIRTUAL_PS1:-}"
|
| 26 |
+
export PS1
|
| 27 |
+
unset _OLD_VIRTUAL_PS1
|
| 28 |
+
fi
|
| 29 |
+
|
| 30 |
+
unset VIRTUAL_ENV
|
| 31 |
+
unset VIRTUAL_ENV_PROMPT
|
| 32 |
+
if [ ! "${1:-}" = "nondestructive" ] ; then
|
| 33 |
+
# Self destruct!
|
| 34 |
+
unset -f deactivate
|
| 35 |
+
fi
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
# unset irrelevant variables
|
| 39 |
+
deactivate nondestructive
|
| 40 |
+
|
| 41 |
+
VIRTUAL_ENV=/home/Ubuntu/Downloads/Unmodel/ACE_plus/flashenv
|
| 42 |
+
export VIRTUAL_ENV
|
| 43 |
+
|
| 44 |
+
_OLD_VIRTUAL_PATH="$PATH"
|
| 45 |
+
PATH="$VIRTUAL_ENV/"bin":$PATH"
|
| 46 |
+
export PATH
|
| 47 |
+
|
| 48 |
+
# unset PYTHONHOME if set
|
| 49 |
+
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
| 50 |
+
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
| 51 |
+
if [ -n "${PYTHONHOME:-}" ] ; then
|
| 52 |
+
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
| 53 |
+
unset PYTHONHOME
|
| 54 |
+
fi
|
| 55 |
+
|
| 56 |
+
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
| 57 |
+
_OLD_VIRTUAL_PS1="${PS1:-}"
|
| 58 |
+
PS1='(flashenv) '"${PS1:-}"
|
| 59 |
+
export PS1
|
| 60 |
+
VIRTUAL_ENV_PROMPT='(flashenv) '
|
| 61 |
+
export VIRTUAL_ENV_PROMPT
|
| 62 |
+
fi
|
| 63 |
+
|
| 64 |
+
# This should detect bash and zsh, which have a hash command that must
|
| 65 |
+
# be called to get it to forget past commands. Without forgetting
|
| 66 |
+
# past commands the $PATH changes we made may not be respected
|
| 67 |
+
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
| 68 |
+
hash -r 2> /dev/null
|
| 69 |
+
fi
|
ACE_plus/flashenv/bin/activate.csh
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file must be used with "source bin/activate.csh" *from csh*.
|
| 2 |
+
# You cannot run it directly.
|
| 3 |
+
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
| 4 |
+
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
| 5 |
+
|
| 6 |
+
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
| 7 |
+
|
| 8 |
+
# Unset irrelevant variables.
|
| 9 |
+
deactivate nondestructive
|
| 10 |
+
|
| 11 |
+
setenv VIRTUAL_ENV /home/Ubuntu/Downloads/Unmodel/ACE_plus/flashenv
|
| 12 |
+
|
| 13 |
+
set _OLD_VIRTUAL_PATH="$PATH"
|
| 14 |
+
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
set _OLD_VIRTUAL_PROMPT="$prompt"
|
| 18 |
+
|
| 19 |
+
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
| 20 |
+
set prompt = '(flashenv) '"$prompt"
|
| 21 |
+
setenv VIRTUAL_ENV_PROMPT '(flashenv) '
|
| 22 |
+
endif
|
| 23 |
+
|
| 24 |
+
alias pydoc python -m pydoc
|
| 25 |
+
|
| 26 |
+
rehash
|
ACE_plus/flashenv/bin/activate.fish
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
| 2 |
+
# (https://fishshell.com/); you cannot run it directly.
|
| 3 |
+
|
| 4 |
+
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
| 5 |
+
# reset old environment variables
|
| 6 |
+
if test -n "$_OLD_VIRTUAL_PATH"
|
| 7 |
+
set -gx PATH $_OLD_VIRTUAL_PATH
|
| 8 |
+
set -e _OLD_VIRTUAL_PATH
|
| 9 |
+
end
|
| 10 |
+
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
| 11 |
+
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
| 12 |
+
set -e _OLD_VIRTUAL_PYTHONHOME
|
| 13 |
+
end
|
| 14 |
+
|
| 15 |
+
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
| 16 |
+
set -e _OLD_FISH_PROMPT_OVERRIDE
|
| 17 |
+
# prevents error when using nested fish instances (Issue #93858)
|
| 18 |
+
if functions -q _old_fish_prompt
|
| 19 |
+
functions -e fish_prompt
|
| 20 |
+
functions -c _old_fish_prompt fish_prompt
|
| 21 |
+
functions -e _old_fish_prompt
|
| 22 |
+
end
|
| 23 |
+
end
|
| 24 |
+
|
| 25 |
+
set -e VIRTUAL_ENV
|
| 26 |
+
set -e VIRTUAL_ENV_PROMPT
|
| 27 |
+
if test "$argv[1]" != "nondestructive"
|
| 28 |
+
# Self-destruct!
|
| 29 |
+
functions -e deactivate
|
| 30 |
+
end
|
| 31 |
+
end
|
| 32 |
+
|
| 33 |
+
# Unset irrelevant variables.
|
| 34 |
+
deactivate nondestructive
|
| 35 |
+
|
| 36 |
+
set -gx VIRTUAL_ENV /home/Ubuntu/Downloads/Unmodel/ACE_plus/flashenv
|
| 37 |
+
|
| 38 |
+
set -gx _OLD_VIRTUAL_PATH $PATH
|
| 39 |
+
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
|
| 40 |
+
|
| 41 |
+
# Unset PYTHONHOME if set.
|
| 42 |
+
if set -q PYTHONHOME
|
| 43 |
+
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
| 44 |
+
set -e PYTHONHOME
|
| 45 |
+
end
|
| 46 |
+
|
| 47 |
+
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
| 48 |
+
# fish uses a function instead of an env var to generate the prompt.
|
| 49 |
+
|
| 50 |
+
# Save the current fish_prompt function as the function _old_fish_prompt.
|
| 51 |
+
functions -c fish_prompt _old_fish_prompt
|
| 52 |
+
|
| 53 |
+
# With the original prompt function renamed, we can override with our own.
|
| 54 |
+
function fish_prompt
|
| 55 |
+
# Save the return status of the last command.
|
| 56 |
+
set -l old_status $status
|
| 57 |
+
|
| 58 |
+
# Output the venv prompt; color taken from the blue of the Python logo.
|
| 59 |
+
printf "%s%s%s" (set_color 4B8BBE) '(flashenv) ' (set_color normal)
|
| 60 |
+
|
| 61 |
+
# Restore the return status of the previous command.
|
| 62 |
+
echo "exit $old_status" | .
|
| 63 |
+
# Output the original/"old" prompt.
|
| 64 |
+
_old_fish_prompt
|
| 65 |
+
end
|
| 66 |
+
|
| 67 |
+
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
| 68 |
+
set -gx VIRTUAL_ENV_PROMPT '(flashenv) '
|
| 69 |
+
end
|
ACE_plus/flashenv/bin/pip
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/home/Ubuntu/Downloads/Unmodel/ACE_plus/flashenv/bin/python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pip._internal.cli.main import main
|
| 6 |
+
if __name__ == '__main__':
|
| 7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 8 |
+
sys.exit(main())
|
ACE_plus/flashenv/bin/pip3
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/home/Ubuntu/Downloads/Unmodel/ACE_plus/flashenv/bin/python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pip._internal.cli.main import main
|
| 6 |
+
if __name__ == '__main__':
|
| 7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 8 |
+
sys.exit(main())
|
ACE_plus/flashenv/bin/pip3.10
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/home/Ubuntu/Downloads/Unmodel/ACE_plus/flashenv/bin/python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pip._internal.cli.main import main
|
| 6 |
+
if __name__ == '__main__':
|
| 7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 8 |
+
sys.exit(main())
|
ACE_plus/flashenv/bin/python
ADDED
|
Binary file (17.7 kB). View file
|
|
|
ACE_plus/flashenv/bin/python3
ADDED
|
Binary file (17.7 kB). View file
|
|
|
ACE_plus/flashenv/bin/python3.10
ADDED
|
Binary file (17.7 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/pkg_resources/__init__.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d989275a5e9b8d252af087830d5418e0ef537178965262190e7d6be772601c45
|
| 3 |
+
size 113951
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (6.12 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/__main__.cpython-310.pyc
ADDED
|
Binary file (7.19 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_cell_widths.cpython-310.pyc
ADDED
|
Binary file (7.88 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a112fd0d52e2fc5e903d0546415911f83afb711cc1ac9379ef7ca5326c431c5
|
| 3 |
+
size 360065
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_emoji_replace.cpython-310.pyc
ADDED
|
Binary file (1.21 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_export_format.cpython-310.pyc
ADDED
|
Binary file (2.34 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_extension.cpython-310.pyc
ADDED
|
Binary file (506 Bytes). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_fileno.cpython-310.pyc
ADDED
|
Binary file (799 Bytes). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_inspect.cpython-310.pyc
ADDED
|
Binary file (8.63 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_log_render.cpython-310.pyc
ADDED
|
Binary file (2.65 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_loop.cpython-310.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_null_file.cpython-310.pyc
ADDED
|
Binary file (3.25 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_palettes.cpython-310.pyc
ADDED
|
Binary file (5.11 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_pick.cpython-310.pyc
ADDED
|
Binary file (651 Bytes). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_ratio.cpython-310.pyc
ADDED
|
Binary file (5.17 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_spinners.cpython-310.pyc
ADDED
|
Binary file (12.3 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_stack.cpython-310.pyc
ADDED
|
Binary file (849 Bytes). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_timer.cpython-310.pyc
ADDED
|
Binary file (698 Bytes). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_win32_console.cpython-310.pyc
ADDED
|
Binary file (19 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_windows.cpython-310.pyc
ADDED
|
Binary file (1.8 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_windows_renderer.cpython-310.pyc
ADDED
|
Binary file (2.05 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/_wrap.cpython-310.pyc
ADDED
|
Binary file (2.48 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/abc.cpython-310.pyc
ADDED
|
Binary file (1.33 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/align.cpython-310.pyc
ADDED
|
Binary file (8.06 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/ansi.cpython-310.pyc
ADDED
|
Binary file (6.01 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/bar.cpython-310.pyc
ADDED
|
Binary file (2.99 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/box.cpython-310.pyc
ADDED
|
Binary file (8.41 kB). View file
|
|
|
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_vendor/rich/__pycache__/cells.cpython-310.pyc
ADDED
|
Binary file (4.29 kB). View file
|
|
|