nonomm commited on
Commit
4ff3728
Β·
0 Parent(s):

Initial commit.

Browse files
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
2
+ *.pt filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ */samples
16/.job_config.json ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "job": "extension",
3
+ "config": {
4
+ "name": "cl4ud1a",
5
+ "process": [
6
+ {
7
+ "type": "diffusion_trainer",
8
+ "training_folder": "C:\\Users\\ajmm2\\ostris\\AI-Toolkit\\output",
9
+ "sqlite_db_path": "C:\\Users\\ajmm2\\ostris\\AI-Toolkit\\aitk_db.db",
10
+ "device": "cuda",
11
+ "trigger_word": null,
12
+ "performance_log_every": 10,
13
+ "network": {
14
+ "type": "lora",
15
+ "linear": 16,
16
+ "linear_alpha": 16,
17
+ "conv": 16,
18
+ "conv_alpha": 16,
19
+ "lokr_full_rank": true,
20
+ "lokr_factor": -1,
21
+ "network_kwargs": {
22
+ "ignore_if_contains": []
23
+ }
24
+ },
25
+ "save": {
26
+ "dtype": "bf16",
27
+ "save_every": 250,
28
+ "max_step_saves_to_keep": 8,
29
+ "save_format": "diffusers",
30
+ "push_to_hub": false
31
+ },
32
+ "datasets": [
33
+ {
34
+ "folder_path": "C:\\Users\\ajmm2\\ostris\\AI-Toolkit\\datasets/cl4ud1a",
35
+ "mask_path": null,
36
+ "mask_min_value": 0.1,
37
+ "default_caption": "",
38
+ "caption_ext": "txt",
39
+ "caption_dropout_rate": 0.05,
40
+ "cache_latents_to_disk": true,
41
+ "is_reg": false,
42
+ "network_weight": 1,
43
+ "resolution": [
44
+ 768,
45
+ 1024
46
+ ],
47
+ "controls": [],
48
+ "shrink_video_to_frames": true,
49
+ "num_frames": 1,
50
+ "do_i2v": true,
51
+ "flip_x": true,
52
+ "flip_y": false
53
+ }
54
+ ],
55
+ "train": {
56
+ "batch_size": 1,
57
+ "bypass_guidance_embedding": false,
58
+ "steps": 3000,
59
+ "gradient_accumulation": 1,
60
+ "train_unet": true,
61
+ "train_text_encoder": false,
62
+ "gradient_checkpointing": true,
63
+ "noise_scheduler": "flowmatch",
64
+ "optimizer": "adamw8bit",
65
+ "timestep_type": "sigmoid",
66
+ "content_or_style": "balanced",
67
+ "optimizer_params": {
68
+ "weight_decay": 0.0001
69
+ },
70
+ "unload_text_encoder": false,
71
+ "cache_text_embeddings": true,
72
+ "lr": 0.0003,
73
+ "ema_config": {
74
+ "use_ema": false,
75
+ "ema_decay": 0.99
76
+ },
77
+ "skip_first_sample": false,
78
+ "force_first_sample": true,
79
+ "disable_sampling": false,
80
+ "dtype": "bf16",
81
+ "diff_output_preservation": false,
82
+ "diff_output_preservation_multiplier": 1,
83
+ "diff_output_preservation_class": "person",
84
+ "switch_boundary_every": 1,
85
+ "loss_type": "mae",
86
+ "do_differential_guidance": true,
87
+ "differential_guidance_scale": 3
88
+ },
89
+ "model": {
90
+ "name_or_path": "Tongyi-MAI/Z-Image-Turbo",
91
+ "quantize": true,
92
+ "qtype": "qfloat8",
93
+ "quantize_te": true,
94
+ "qtype_te": "qfloat8",
95
+ "arch": "zimage:turbo",
96
+ "low_vram": true,
97
+ "model_kwargs": {},
98
+ "layer_offloading": false,
99
+ "layer_offloading_text_encoder_percent": 1,
100
+ "layer_offloading_transformer_percent": 1,
101
+ "assistant_lora_path": "ostris/zimage_turbo_training_adapter/zimage_turbo_training_adapter_v1.safetensors"
102
+ },
103
+ "sample": {
104
+ "sampler": "flowmatch",
105
+ "sample_every": 250,
106
+ "width": 1024,
107
+ "height": 1024,
108
+ "samples": [
109
+ {
110
+ "prompt": "cl4ud1a woman, close-up portrait, soft natural lighting, neutral background"
111
+ },
112
+ {
113
+ "prompt": "cl4ud1a woman, she has blonde hair, looking directly at the camera, shallow depth of field"
114
+ },
115
+ {
116
+ "prompt": "cl4ud1a woman, medium shot, outdoors soft sunlight, natural pose"
117
+ },
118
+ {
119
+ "prompt": "cl4ud1a woman standing next to another woman, medium shot, both looking at the camera"
120
+ },
121
+ {
122
+ "prompt": "cl4ud1a woman full body, standing in a park, natural lighting, realistic proportions"
123
+ }
124
+ ],
125
+ "neg": "",
126
+ "seed": 42,
127
+ "walk_seed": true,
128
+ "guidance_scale": 1,
129
+ "sample_steps": 4,
130
+ "num_frames": 1,
131
+ "fps": 1
132
+ }
133
+ }
134
+ ]
135
+ },
136
+ "meta": {
137
+ "name": "[name]",
138
+ "version": "1.0"
139
+ }
140
+ }
16/README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cl4ud1a β€” LoRA adapter (Z-Image Turbo, rank=16)
2
+
3
+ This folder holds the training experiment named `cl4ud1a` β€” a LoRA adapter for Tongyi-MAI/Z-Image-Turbo (Z-Image Turbo) tuned with rank 16 parameters.
4
+
5
+ Key facts
6
+ - Base model: Tongyi-MAI/Z-Image-Turbo (arch: zimage:turbo)
7
+ - LoRA type: UNet LoRA (linear=16, conv=16, alpha=16)
8
+ - Training steps: 3000 (checkpoints saved every 250 steps)
9
+ - Save format: Diffusers safetensors (dtype: bf16)
10
+ - Training device: cuda
11
+ - Quantization: qfloat8 applied to model and text encoder
12
+
13
+ Artifacts in this folder
14
+ - `cl4ud1a.safetensors` β€” final LoRA/adapted weights (merged in training pipeline)
15
+ - `cl4ud1a_00000XXXXX.safetensors` β€” saved checkpoints (step increments)
16
+ - `optimizer.pt` β€” optimizer state (checkpoint)
17
+ - `config.yaml` β€” original run configuration used for training
18
+ - `log.txt` β€” raw training log (progress, warnings, reproducibility notes)
19
+ - `samples/` β€” generated sample images at a few checkpoints
20
+
21
+ Training & notes observed
22
+ - Training used a small dataset (~14 images, augmented to 28 via flips) at mixed resolutions (768–1024). Latents and text embeddings were cached for speed.
23
+ - A PIL-based EXIF parsing error appeared for one PNG during preprocessing; dataset sanitation is recommended before reproduction (see log snippet).
24
+ - Assistant LoRA adapter was loaded/merged during training β€” see `config.yaml` for assistant adapter path.
25
+
26
+ How to reproduce (short)
27
+ 1. Ensure you have the same base model (Tongyi-MAI/Z-Image-Turbo) accessible.
28
+ 2. Recreate the environment with GPU + CUDA and BF16 support.
29
+ 3. Use `config.yaml` to re-run the trainer used by the author (dataset paths will need adjustment).
30
+
31
+ Usage example (consumer)
32
+ - To apply the LoRA at inference time, use your Z-Image-Turbo-compatible pipeline loader and merge or inject the safetensors file into the UNet weights (example depends on your runner/adapter).
33
+
34
+ If you plan to upload this experiment to Hugging Face: include `cl4ud1a.safetensors`, `config.yaml`, `log.txt` and a short model card describing license and data provenance.
16/cl4ud1a.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38a0dda00e4d4e393735513148ac7e41ebbffea36d654cd4217b4c568720f7c8
3
+ size 85094792
16/cl4ud1a_000001000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5326d250f623265e5eea4cbd31e88851e4485d13973ffb412226727d9928948f
3
+ size 85094792
16/cl4ud1a_000001250.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb12810b068a064340815f2e6f80e3813045bcd3507ed1f50bbb2b92360dad3
3
+ size 85094792
16/cl4ud1a_000001500.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af378fe5ca27d2e9956a42b974d82cd780b69dc9d78acf75ad7b6fb1712d4edc
3
+ size 85094792
16/cl4ud1a_000001750.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9040947412f239ee0d4283acf2610226de92b6ad7b5b6fb67471fd0a0eb8259
3
+ size 85094792
16/cl4ud1a_000002000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74ccccc8b895bab3704745610ce5024ddd87025f9a77105d2d0504c7c9bb16f0
3
+ size 85094792
16/cl4ud1a_000002250.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37479d38190b7d89efeee356ab162d8d015bbe05949a0f96b3fb6904d800ad52
3
+ size 85094792
16/cl4ud1a_000002500.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eee3f551ec7d38f6b6a57179bd479836ab80da8a8fea185e78fba23da17e977
3
+ size 85094792
16/cl4ud1a_000002750.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9644592c080752f478209b384b37661ba5a35c7ed87c17f2b50398c29510a226
3
+ size 85094792
16/config.yaml ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ job: extension
2
+ config:
3
+ name: cl4ud1a
4
+ process:
5
+ - type: diffusion_trainer
6
+ training_folder: C:\Users\ajmm2\ostris\AI-Toolkit\output
7
+ sqlite_db_path: C:\Users\ajmm2\ostris\AI-Toolkit\aitk_db.db
8
+ device: cuda
9
+ trigger_word: null
10
+ performance_log_every: 10
11
+ network:
12
+ type: lora
13
+ linear: 16
14
+ linear_alpha: 16
15
+ conv: 16
16
+ conv_alpha: 16
17
+ lokr_full_rank: true
18
+ lokr_factor: -1
19
+ network_kwargs:
20
+ ignore_if_contains: []
21
+ save:
22
+ dtype: bf16
23
+ save_every: 250
24
+ max_step_saves_to_keep: 8
25
+ save_format: diffusers
26
+ push_to_hub: false
27
+ datasets:
28
+ - folder_path: C:\Users\ajmm2\ostris\AI-Toolkit\datasets/cl4ud1a
29
+ mask_path: null
30
+ mask_min_value: 0.1
31
+ default_caption: ''
32
+ caption_ext: txt
33
+ caption_dropout_rate: 0.05
34
+ cache_latents_to_disk: true
35
+ is_reg: false
36
+ network_weight: 1
37
+ resolution:
38
+ - 768
39
+ - 1024
40
+ controls: []
41
+ shrink_video_to_frames: true
42
+ num_frames: 1
43
+ do_i2v: true
44
+ flip_x: true
45
+ flip_y: false
46
+ train:
47
+ batch_size: 1
48
+ bypass_guidance_embedding: false
49
+ steps: 3000
50
+ gradient_accumulation: 1
51
+ train_unet: true
52
+ train_text_encoder: false
53
+ gradient_checkpointing: true
54
+ noise_scheduler: flowmatch
55
+ optimizer: adamw8bit
56
+ timestep_type: sigmoid
57
+ content_or_style: balanced
58
+ optimizer_params:
59
+ weight_decay: 0.0001
60
+ unload_text_encoder: false
61
+ cache_text_embeddings: true
62
+ lr: 0.0003
63
+ ema_config:
64
+ use_ema: false
65
+ ema_decay: 0.99
66
+ skip_first_sample: false
67
+ force_first_sample: true
68
+ disable_sampling: false
69
+ dtype: bf16
70
+ diff_output_preservation: false
71
+ diff_output_preservation_multiplier: 1
72
+ diff_output_preservation_class: person
73
+ switch_boundary_every: 1
74
+ loss_type: mae
75
+ do_differential_guidance: true
76
+ differential_guidance_scale: 3
77
+ model:
78
+ name_or_path: Tongyi-MAI/Z-Image-Turbo
79
+ quantize: true
80
+ qtype: qfloat8
81
+ quantize_te: true
82
+ qtype_te: qfloat8
83
+ arch: zimage:turbo
84
+ low_vram: true
85
+ model_kwargs: {}
86
+ layer_offloading: false
87
+ layer_offloading_text_encoder_percent: 1
88
+ layer_offloading_transformer_percent: 1
89
+ assistant_lora_path: ostris/zimage_turbo_training_adapter/zimage_turbo_training_adapter_v1.safetensors
90
+ sample:
91
+ sampler: flowmatch
92
+ sample_every: 250
93
+ width: 1024
94
+ height: 1024
95
+ samples:
96
+ - prompt: cl4ud1a woman, close-up portrait, soft natural lighting, neutral background
97
+ - prompt: cl4ud1a woman, she has blonde hair, looking directly at the camera,
98
+ shallow depth of field
99
+ - prompt: cl4ud1a woman, medium shot, outdoors soft sunlight, natural pose
100
+ - prompt: cl4ud1a woman standing next to another woman, medium shot, both looking
101
+ at the camera
102
+ - prompt: cl4ud1a woman full body, standing in a park, natural lighting, realistic
103
+ proportions
104
+ neg: ''
105
+ seed: 42
106
+ walk_seed: true
107
+ guidance_scale: 1
108
+ sample_steps: 4
109
+ num_frames: 1
110
+ fps: 1
111
+ meta:
112
+ name: cl4ud1a
113
+ version: '1.0'
16/log.txt ADDED
The diff for this file is too large to render. See raw diff
 
16/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8a4253a725f22d178e0c00aac4801e06957f61adffe25d5c8ebabb80504df6
3
+ size 86924069
AGENTS.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AGENTS.md β€” maintenance & publishing guide
2
+
3
+ Purpose
4
+ - A short operating manual for maintainers and agents responsible for validating, publishing and reproducing experiments in this repo.
5
+
6
+ Primary responsibilities
7
+ - Verify artifacts are complete and named consistently.
8
+ - Confirm the run configuration, dataset provenance and licenses before publishing.
9
+ - Run a basic inference sanity check (generate sample images) before uploading.
10
+
11
+ Checklist prior to publishing to Hugging Face
12
+ 1. Files: ensure `*.safetensors` (final & checkpoints), `config.yaml`, `log.txt`, `optimizer.pt` (optional), and `samples/` are present and readable.
13
+ 2. Metadata: create or update a short model card (README or model card in HF) with dataset provenance, license and usage notes.
14
+ 3. Privacy: confirm no private personal data is included in the dataset or commit history.
15
+ 4. Reproducibility: verify that `config.yaml` matches the run that produced the artifacts and that sample generation runs successfully.
16
+
17
+ Quick publish steps (recommended)
18
+ 1. Inspect artifacts/size and confirm they match expectations.
19
+ 2. Generate validation samples (use the local Z-Image Turbo runner or a minimal script that loads the base model + LoRA and produces 2–5 images).
20
+ 3. Write or complete the model card: include base model, LoRA config (rank, layers), number of steps, dataset summary and license.
21
+ 4. Upload/commit to a HF model repository. Minimal files to include:
22
+ - `cl4ud1a.safetensors` (final adapter)
23
+ - `config.yaml` (run configuration)
24
+ - `log.txt` (training log or condensed training summary)
25
+ - `README.md` or `model_card.md` (short description & instructions)
26
+ - `samples/` (small set of generated images)
27
+
28
+ Publishing tips & small scripts
29
+ - When in doubt, run a short inference test using the same sampler/seed used for saved samples to confirm the LoRA applies and produces reasonable output.
30
+ - Use HF CLI or web UI for model uploads; prefer `safetensors` for environments that accept them.
31
+
32
+ Versioning / tagging
33
+ - Follow semantic incrementing when creating releases (e.g., v1.0 for the first publish). Keep a changelog entry when re-trained or restructured.
34
+
35
+ Automation & CI
36
+ - Add a lightweight validation workflow to run a short inference test (CPU/GPU optional) to ensure `cl4ud1a.safetensors` loads and generates output.
37
+
38
+ Notes for reviewers
39
+ - Check for dataset licensing issues and flagged content in the training set before accepting publication.
40
+ - Encourage authors to add a clear license and small sanitized dataset description for the model card.
README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # zimage_lora β€” Z-Image Turbo LoRA experiments
2
+
3
+ This repository contains training artifacts and provenance for a LoRA adapter trained on top of Tongyi-MAI/Z-Image-Turbo (Z-Image Turbo). The main experiment in this repo is the `16/` folder, which holds a LoRA tuned with a base rank of 16 for linear and conv adapters.
4
+
5
+ Purpose
6
+ - Collect, preserve and document a training run so it can be uploaded to Hugging Face or reproduced locally.
7
+
8
+ Contents
9
+ - `16/` β€” experiment folder with model artifacts, training config and logs.
10
+
11
+ Quick summary
12
+ - Base model: Tongyi-MAI/Z-Image-Turbo (arch: zimage:turbo)
13
+ - LoRA type: UNet LoRA with rank 16 (linear and conv set to 16, alpha 16)
14
+ - Training steps: 3000 (checkpoint saves every 250 steps)
15
+ - Output format: diffusers safetensors
16
+
17
+ Usage notes
18
+ - Files in `16/` are training artifacts. To use the resulting LoRA, merge or load the safetensors using a compatible Z-Image-Turbo pipeline (Diffusers or a compatible loader).
19
+
20
+ For maintainers (see AGENTS.md) β€” how to review, test, and publish artifacts is documented in `AGENTS.md`.
21
+
22
+ If you need a short, experiment-focused README (example usage and provenance) look in `16/README.md`.
model_card.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cl4ud1a β€” Z-Image Turbo LoRA (rank=16)
2
+
3
+ Short description
4
+ - cl4ud1a is a lightweight LoRA adapter trained on top of the Tongyi-MAI/Z-Image-Turbo (Z-Image Turbo) model to adapt U-Net weights for a small custom dataset. This adapter is saved in Diffusers-compatible safetensors format.
5
+
6
+ Model details
7
+ - Base model: Tongyi-MAI/Z-Image-Turbo (architecture: zimage:turbo)
8
+ - Adapter type: LoRA applied to U-Net (linear=16, conv=16, alpha=16)
9
+ - Training steps: 3000 (checkpoints saved every 250 steps)
10
+ - Precision & format: bf16 safetensors (Diffusers format)
11
+ - Quantization: qfloat8 used for model and text encoder during training
12
+
13
+ Dataset & provenance
14
+ - Small author dataset (about 14 original images, augmented to 28 via flips). Mixed resolutions: 768 and 1024.
15
+ - The dataset path used for training is local to the author; external users should verify dataset provenance and licensing before re-training or publishing. One preprocessing error was observed for a PNG; ensure dataset files are valid and sanitized before reuse.
16
+
17
+ Intended uses
18
+ - Fine-tuning / personalization: Merge or apply this LoRA to Z-Image-Turbo at inference time to bias generation toward the target style/content in the training set.
19
+ - Research / reproducibility: Use the included `config.yaml` and `log.txt` to reproduce or extend the experiment.
20
+
21
+ Limitations & risks
22
+ - Small dataset: Results can overfit and might not generalize. Expect style/topic biases from the training images.
23
+ - Sensitive data risk: Confirm the dataset contains no private personal data or copyrighted material you do not own the right to share.
24
+ - Safety: Do not use the adapter to generate disallowed content. Be aware of model misuse and legal/ethical constraints applying to generated images.
25
+
26
+ How to use (basic)
27
+ 1. Obtain Tongyi-MAI/Z-Image-Turbo model and a compatible loader or pipeline that accepts LoRA/adapters.
28
+ 2. Load or merge `cl4ud1a.safetensors` into the U-Net weights of Z-Image-Turbo. Tools/commands depend on the runner you use (Diffusers-based, adapter loader, or custom runner).
29
+ 3. Recreate sampling settings used for validation if you want the same results (see `16/config.yaml` and `16/samples/`).
30
+
31
+ Files included in repo
32
+ - `16/cl4ud1a.safetensors` β€” final adapter
33
+ - `16/cl4ud1a_00000*.safetensors` β€” checkpoint saves
34
+ - `16/config.yaml` β€” training configuration
35
+ - `16/log.txt` β€” training log
36
+ - `16/samples/` β€” validation sample images
37
+
38
+ Training notes
39
+ - Trained with batch size 1, learning rate 3e-4, differential guidance and optimizer `adamw8bit`.
40
+ - Text encoder training was disabled; UNet weights were updated.
41
+
42
+ Model card license
43
+ - No license is included with this model card. If you want to publish or redistribute the adapter or associated files, add an explicit license before uploading to model hubs or public distribution channels.
44
+
45
+ Contact / authorship
46
+ - See repository maintainer or `AGENTS.md` for publishing guidance and pre-upload checks.
47
+
48
+ Citation
49
+ - If you use this adapter in your research or public project, please cite the repository and the base model (Tongyi-MAI/Z-Image-Turbo) according to their respective citation guidelines.