danielmnd Phr00t commited on
Commit
6e7e43a
·
verified ·
0 Parent(s):

Duplicate from Phr00t/Qwen-Image-Edit-Rapid-AIO

Browse files

Co-authored-by: Phr00t <Phr00t@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. Qwen-Rapid-AIO-v1-8step.safetensors +3 -0
  3. Qwen-Rapid-AIO-v1.safetensors +3 -0
  4. Qwen-Rapid-AIO-v2.safetensors +3 -0
  5. Qwen-Rapid-AIO-v3.safetensors +3 -0
  6. Qwen-Rapid-AIO-v4.safetensors +3 -0
  7. Qwen-Rapid-AIO.json +539 -0
  8. README.md +70 -0
  9. fixed-textencode-node/.can_replace_comfyui_node_with_this.v2_is_best +0 -0
  10. fixed-textencode-node/README.md +12 -0
  11. fixed-textencode-node/nodes_qwen.py +118 -0
  12. fixed-textencode-node/nodes_qwen.v2.py +117 -0
  13. v10/Qwen-Rapid-AIO-NSFW-v10.2.safetensors +3 -0
  14. v10/Qwen-Rapid-AIO-NSFW-v10.4.safetensors +3 -0
  15. v10/Qwen-Rapid-AIO-NSFW-v10.safetensors +3 -0
  16. v10/Qwen-Rapid-AIO-SFW-v10.safetensors +3 -0
  17. v11/Qwen-Rapid-AIO-NSFW-v11.1.safetensors +3 -0
  18. v11/Qwen-Rapid-AIO-NSFW-v11.3.safetensors +3 -0
  19. v11/Qwen-Rapid-AIO-NSFW-v11.4.safetensors +3 -0
  20. v11/Qwen-Rapid-AIO-NSFW-v11.safetensors +3 -0
  21. v11/Qwen-Rapid-AIO-SFW-v11.safetensors +3 -0
  22. v12/Qwen-Rapid-AIO-NSFW-v12.safetensors +3 -0
  23. v12/Qwen-Rapid-AIO-SFW-v12.safetensors +3 -0
  24. v13/Qwen-Rapid-AIO-NSFW-v13.safetensors +3 -0
  25. v13/Qwen-Rapid-AIO-SFW-v13.safetensors +3 -0
  26. v14/Qwen-Rapid-AIO-NSFW-v14.1.safetensors +3 -0
  27. v14/Qwen-Rapid-AIO-NSFW-v14.safetensors +3 -0
  28. v14/Qwen-Rapid-AIO-SFW-v14.1.safetensors +3 -0
  29. v14/Qwen-Rapid-AIO-SFW-v14.safetensors +3 -0
  30. v15/.no_nsfw_version_for_this +0 -0
  31. v15/Qwen-Rapid-AIO-SFW-v15.safetensors +3 -0
  32. v16/Qwen-Rapid-AIO-NSFW-v16.safetensors +3 -0
  33. v16/Qwen-Rapid-AIO-SFW-v16.safetensors +3 -0
  34. v17/Qwen-Rapid-AIO-NSFW-v17.safetensors +3 -0
  35. v17/Qwen-Rapid-AIO-SFW-v17.safetensors +3 -0
  36. v18/Qwen-Rapid-AIO-NSFW-v18.1.safetensors +3 -0
  37. v18/Qwen-Rapid-AIO-NSFW-v18.safetensors +3 -0
  38. v18/Qwen-Rapid-AIO-SFW-v18.safetensors +3 -0
  39. v19/Qwen-Rapid-AIO-NSFW-v19.safetensors +3 -0
  40. v19/Qwen-Rapid-AIO-SFW-v19.safetensors +3 -0
  41. v20/Qwen-Rapid-AIO-NSFW-v20.safetensors +3 -0
  42. v20/Qwen-Rapid-AIO-SFW-v20.safetensors +3 -0
  43. v21/Qwen-Rapid-AIO-NSFW-v21.safetensors +3 -0
  44. v21/Qwen-Rapid-AIO-SFW-v21.safetensors +3 -0
  45. v22/Qwen-Rapid-AIO-NSFW-v22.safetensors +3 -0
  46. v22/Qwen-Rapid-AIO-SFW-v22.safetensors +3 -0
  47. v23/Qwen-Rapid-AIO-NSFW-v23.safetensors +3 -0
  48. v23/Qwen-Rapid-AIO-SFW-v23.safetensors +3 -0
  49. v5/Qwen-Rapid-AIO-NSFW-v5.1.safetensors +3 -0
  50. v5/Qwen-Rapid-AIO-NSFW-v5.2.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Qwen-Rapid-AIO-v1-8step.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c98d037873f21bea336fa49ed812015508a23848535ea3ece38393d6418a44c9
3
+ size 28978617994
Qwen-Rapid-AIO-v1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:826b842be7091dbe013d8784b70c02df2768e16fa9328f3f445dd7638510752c
3
+ size 28978618010
Qwen-Rapid-AIO-v2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c38a63e347c0170865cea091dd0d1c07e857a19959b26414b92617bd7d4254e
3
+ size 28978618770
Qwen-Rapid-AIO-v3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24d0780a13490b523b1c06fbbdd67ad4aabbda069e50a887558c5766e7a0aa76
3
+ size 28978618442
Qwen-Rapid-AIO-v4.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fae21038986aabdd879cd7754a77abf56d3c200147362d1ebaa92c1662dafda9
3
+ size 28978619370
Qwen-Rapid-AIO.json ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "b058853e-412f-4f02-95e2-abe2a40ebc2e",
3
+ "revision": 0,
4
+ "last_node_id": 9,
5
+ "last_link_id": 18,
6
+ "nodes": [
7
+ {
8
+ "id": 1,
9
+ "type": "CheckpointLoaderSimple",
10
+ "pos": [
11
+ -239.19235229492188,
12
+ 25.397891998291016
13
+ ],
14
+ "size": [
15
+ 369.6529235839844,
16
+ 100.6932373046875
17
+ ],
18
+ "flags": {},
19
+ "order": 0,
20
+ "mode": 0,
21
+ "inputs": [],
22
+ "outputs": [
23
+ {
24
+ "name": "MODEL",
25
+ "type": "MODEL",
26
+ "links": [
27
+ 1
28
+ ]
29
+ },
30
+ {
31
+ "name": "CLIP",
32
+ "type": "CLIP",
33
+ "links": [
34
+ 2,
35
+ 7
36
+ ]
37
+ },
38
+ {
39
+ "name": "VAE",
40
+ "type": "VAE",
41
+ "links": [
42
+ 6,
43
+ 8,
44
+ 9
45
+ ]
46
+ }
47
+ ],
48
+ "properties": {
49
+ "Node name for S&R": "CheckpointLoaderSimple"
50
+ },
51
+ "widgets_values": [
52
+ "Qwen\\Qwen-Rapid-AIO-v1.safetensors"
53
+ ]
54
+ },
55
+ {
56
+ "id": 6,
57
+ "type": "PreviewImage",
58
+ "pos": [
59
+ 166.48406982421875,
60
+ 439.3702392578125
61
+ ],
62
+ "size": [
63
+ 266.2825927734375,
64
+ 346.3945617675781
65
+ ],
66
+ "flags": {},
67
+ "order": 8,
68
+ "mode": 0,
69
+ "inputs": [
70
+ {
71
+ "name": "images",
72
+ "type": "IMAGE",
73
+ "link": 10
74
+ }
75
+ ],
76
+ "outputs": [],
77
+ "properties": {
78
+ "Node name for S&R": "PreviewImage"
79
+ },
80
+ "widgets_values": []
81
+ },
82
+ {
83
+ "id": 5,
84
+ "type": "VAEDecode",
85
+ "pos": [
86
+ 229.74990844726562,
87
+ 338.609375
88
+ ],
89
+ "size": [
90
+ 140,
91
+ 46
92
+ ],
93
+ "flags": {},
94
+ "order": 7,
95
+ "mode": 0,
96
+ "inputs": [
97
+ {
98
+ "name": "samples",
99
+ "type": "LATENT",
100
+ "link": 5
101
+ },
102
+ {
103
+ "name": "vae",
104
+ "type": "VAE",
105
+ "link": 6
106
+ }
107
+ ],
108
+ "outputs": [
109
+ {
110
+ "name": "IMAGE",
111
+ "type": "IMAGE",
112
+ "links": [
113
+ 10
114
+ ]
115
+ }
116
+ ],
117
+ "properties": {
118
+ "Node name for S&R": "VAEDecode"
119
+ },
120
+ "widgets_values": []
121
+ },
122
+ {
123
+ "id": 9,
124
+ "type": "EmptyLatentImage",
125
+ "pos": [
126
+ -189.9932403564453,
127
+ 660.2916870117188
128
+ ],
129
+ "size": [
130
+ 270,
131
+ 106
132
+ ],
133
+ "flags": {},
134
+ "order": 1,
135
+ "mode": 0,
136
+ "inputs": [],
137
+ "outputs": [
138
+ {
139
+ "name": "LATENT",
140
+ "type": "LATENT",
141
+ "links": [
142
+ 13
143
+ ]
144
+ }
145
+ ],
146
+ "title": "Final Image Size",
147
+ "properties": {
148
+ "Node name for S&R": "EmptyLatentImage"
149
+ },
150
+ "widgets_values": [
151
+ 768,
152
+ 768,
153
+ 1
154
+ ]
155
+ },
156
+ {
157
+ "id": 2,
158
+ "type": "KSampler",
159
+ "pos": [
160
+ 165.2884979248047,
161
+ 23.94108009338379
162
+ ],
163
+ "size": [
164
+ 270,
165
+ 262
166
+ ],
167
+ "flags": {},
168
+ "order": 6,
169
+ "mode": 0,
170
+ "inputs": [
171
+ {
172
+ "name": "model",
173
+ "type": "MODEL",
174
+ "link": 1
175
+ },
176
+ {
177
+ "name": "positive",
178
+ "type": "CONDITIONING",
179
+ "link": 3
180
+ },
181
+ {
182
+ "name": "negative",
183
+ "type": "CONDITIONING",
184
+ "link": 4
185
+ },
186
+ {
187
+ "name": "latent_image",
188
+ "type": "LATENT",
189
+ "link": 13
190
+ }
191
+ ],
192
+ "outputs": [
193
+ {
194
+ "name": "LATENT",
195
+ "type": "LATENT",
196
+ "links": [
197
+ 5
198
+ ]
199
+ }
200
+ ],
201
+ "properties": {
202
+ "Node name for S&R": "KSampler"
203
+ },
204
+ "widgets_values": [
205
+ 65454653,
206
+ "fixed",
207
+ 4,
208
+ 1,
209
+ "sa_solver",
210
+ "beta",
211
+ 1
212
+ ]
213
+ },
214
+ {
215
+ "id": 4,
216
+ "type": "TextEncodeQwenImageEditPlus",
217
+ "pos": [
218
+ -259.2781677246094,
219
+ 417.37896728515625
220
+ ],
221
+ "size": [
222
+ 407.40655517578125,
223
+ 195.96002197265625
224
+ ],
225
+ "flags": {},
226
+ "order": 4,
227
+ "mode": 0,
228
+ "inputs": [
229
+ {
230
+ "name": "clip",
231
+ "type": "CLIP",
232
+ "link": 7
233
+ },
234
+ {
235
+ "name": "vae",
236
+ "shape": 7,
237
+ "type": "VAE",
238
+ "link": 9
239
+ },
240
+ {
241
+ "name": "image1",
242
+ "shape": 7,
243
+ "type": "IMAGE",
244
+ "link": null
245
+ },
246
+ {
247
+ "name": "image2",
248
+ "shape": 7,
249
+ "type": "IMAGE",
250
+ "link": null
251
+ },
252
+ {
253
+ "name": "image3",
254
+ "shape": 7,
255
+ "type": "IMAGE",
256
+ "link": null
257
+ }
258
+ ],
259
+ "outputs": [
260
+ {
261
+ "name": "CONDITIONING",
262
+ "type": "CONDITIONING",
263
+ "links": [
264
+ 4
265
+ ]
266
+ }
267
+ ],
268
+ "title": "TextEncodeQwenImageEditPlus Negative (leave blank)",
269
+ "properties": {
270
+ "Node name for S&R": "TextEncodeQwenImageEditPlus"
271
+ },
272
+ "widgets_values": [
273
+ ""
274
+ ]
275
+ },
276
+ {
277
+ "id": 3,
278
+ "type": "TextEncodeQwenImageEditPlus",
279
+ "pos": [
280
+ -230.1153106689453,
281
+ 170.6533660888672
282
+ ],
283
+ "size": [
284
+ 351.52020263671875,
285
+ 197.97994995117188
286
+ ],
287
+ "flags": {},
288
+ "order": 5,
289
+ "mode": 0,
290
+ "inputs": [
291
+ {
292
+ "name": "clip",
293
+ "type": "CLIP",
294
+ "link": 2
295
+ },
296
+ {
297
+ "name": "vae",
298
+ "shape": 7,
299
+ "type": "VAE",
300
+ "link": 8
301
+ },
302
+ {
303
+ "name": "image1",
304
+ "shape": 7,
305
+ "type": "IMAGE",
306
+ "link": 17
307
+ },
308
+ {
309
+ "name": "image2",
310
+ "shape": 7,
311
+ "type": "IMAGE",
312
+ "link": 18
313
+ },
314
+ {
315
+ "name": "image3",
316
+ "shape": 7,
317
+ "type": "IMAGE",
318
+ "link": null
319
+ }
320
+ ],
321
+ "outputs": [
322
+ {
323
+ "name": "CONDITIONING",
324
+ "type": "CONDITIONING",
325
+ "links": [
326
+ 3
327
+ ]
328
+ }
329
+ ],
330
+ "title": "TextEncodeQwenImageEditPlus Input Prompt",
331
+ "properties": {
332
+ "Node name for S&R": "TextEncodeQwenImageEditPlus"
333
+ },
334
+ "widgets_values": [
335
+ "Put the woman holding a balloon next to the ninja in the hallway."
336
+ ]
337
+ },
338
+ {
339
+ "id": 8,
340
+ "type": "LoadImage",
341
+ "pos": [
342
+ -525.282470703125,
343
+ 21.345966339111328
344
+ ],
345
+ "size": [
346
+ 259.9401550292969,
347
+ 326
348
+ ],
349
+ "flags": {},
350
+ "order": 2,
351
+ "mode": 0,
352
+ "inputs": [],
353
+ "outputs": [
354
+ {
355
+ "name": "IMAGE",
356
+ "type": "IMAGE",
357
+ "links": [
358
+ 18
359
+ ]
360
+ },
361
+ {
362
+ "name": "MASK",
363
+ "type": "MASK",
364
+ "links": null
365
+ }
366
+ ],
367
+ "title": "Optional Input Image",
368
+ "properties": {
369
+ "Node name for S&R": "LoadImage"
370
+ },
371
+ "widgets_values": [
372
+ "ComfyUI_temp_ljpig_00008_.png",
373
+ "image"
374
+ ]
375
+ },
376
+ {
377
+ "id": 7,
378
+ "type": "LoadImage",
379
+ "pos": [
380
+ -555.2627563476562,
381
+ 396.7561340332031
382
+ ],
383
+ "size": [
384
+ 274.080078125,
385
+ 314.0000305175781
386
+ ],
387
+ "flags": {},
388
+ "order": 3,
389
+ "mode": 0,
390
+ "inputs": [],
391
+ "outputs": [
392
+ {
393
+ "name": "IMAGE",
394
+ "type": "IMAGE",
395
+ "links": [
396
+ 17
397
+ ]
398
+ },
399
+ {
400
+ "name": "MASK",
401
+ "type": "MASK",
402
+ "links": null
403
+ }
404
+ ],
405
+ "title": "Optional Input Image",
406
+ "properties": {
407
+ "Node name for S&R": "LoadImage"
408
+ },
409
+ "widgets_values": [
410
+ "ComfyUI_temp_zmuag_00002_.png",
411
+ "image"
412
+ ]
413
+ }
414
+ ],
415
+ "links": [
416
+ [
417
+ 1,
418
+ 1,
419
+ 0,
420
+ 2,
421
+ 0,
422
+ "MODEL"
423
+ ],
424
+ [
425
+ 2,
426
+ 1,
427
+ 1,
428
+ 3,
429
+ 0,
430
+ "CLIP"
431
+ ],
432
+ [
433
+ 3,
434
+ 3,
435
+ 0,
436
+ 2,
437
+ 1,
438
+ "CONDITIONING"
439
+ ],
440
+ [
441
+ 4,
442
+ 4,
443
+ 0,
444
+ 2,
445
+ 2,
446
+ "CONDITIONING"
447
+ ],
448
+ [
449
+ 5,
450
+ 2,
451
+ 0,
452
+ 5,
453
+ 0,
454
+ "LATENT"
455
+ ],
456
+ [
457
+ 6,
458
+ 1,
459
+ 2,
460
+ 5,
461
+ 1,
462
+ "VAE"
463
+ ],
464
+ [
465
+ 7,
466
+ 1,
467
+ 1,
468
+ 4,
469
+ 0,
470
+ "CLIP"
471
+ ],
472
+ [
473
+ 8,
474
+ 1,
475
+ 2,
476
+ 3,
477
+ 1,
478
+ "VAE"
479
+ ],
480
+ [
481
+ 9,
482
+ 1,
483
+ 2,
484
+ 4,
485
+ 1,
486
+ "VAE"
487
+ ],
488
+ [
489
+ 10,
490
+ 5,
491
+ 0,
492
+ 6,
493
+ 0,
494
+ "IMAGE"
495
+ ],
496
+ [
497
+ 13,
498
+ 9,
499
+ 0,
500
+ 2,
501
+ 3,
502
+ "LATENT"
503
+ ],
504
+ [
505
+ 17,
506
+ 7,
507
+ 0,
508
+ 3,
509
+ 2,
510
+ "IMAGE"
511
+ ],
512
+ [
513
+ 18,
514
+ 8,
515
+ 0,
516
+ 3,
517
+ 3,
518
+ "IMAGE"
519
+ ]
520
+ ],
521
+ "groups": [],
522
+ "config": {},
523
+ "extra": {
524
+ "ds": {
525
+ "scale": 1.0435333929857133,
526
+ "offset": [
527
+ 919.9049410313564,
528
+ 75.66198687470876
529
+ ]
530
+ },
531
+ "ue_links": [],
532
+ "frontendVersion": "1.26.13",
533
+ "VHS_latentpreview": false,
534
+ "VHS_latentpreviewrate": 0,
535
+ "VHS_MetadataImage": true,
536
+ "VHS_KeepIntermediate": true
537
+ },
538
+ "version": 0.4
539
+ }
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model:
4
+ - Qwen/Qwen-Image-Edit-2511
5
+ pipeline_tag: text-to-image
6
+ library_name: comfyUI
7
+ tags:
8
+ - qwen
9
+ - qwen-edit
10
+ - t2i
11
+ - i2i
12
+ ---
13
+
14
+ Merge of accelerators, VAE and CLIP to allow for easy and fast Qwen Image Edit (and text to image) support.
15
+
16
+ Use a "Load Checkpoint" node. 1 CFG, 4 step. Use the "TextEncodeQwenImageEditPlus" node for input images (which are optional) and prompt. Provide no images to just do pure text to image. FP8 precision.
17
+
18
+ **Both NSFW and SFW models are available!** v4 and older combine both NSFW and SFW uses in one model, but performance is subpar. v5+ separates out a NSFW and SFW version, so please pick which model for your use case.
19
+
20
+ **Having problems with scaling, cropping or zooming?** Scaling images in the TextEncoderQwenEditPlus node is the problem. There are many workarounds, but I prefer just fixing the node and I've supplied my version in the Files area. It also supports up to 4 input images. Use the "v2" version and supply the same latent you are sampling with (usually an empty latent), which provides sizing for informed scaling.
21
+
22
+ ![image](https://cdn-uploads.huggingface.co/production/uploads/631be8402ea8535ea48abbc6/ynDNK35eRLlUjha75fYHH.png)
23
+
24
+ **V1:** Uses Qwen-Image-Edit-2509 & 4-step Lightning v2.0. Includes a touch of NSFW LORAs, so it should be a very versatile model for both SFW and NSFW use. sa_solver/beta recommended, but euler_a/beta and er_sde/beta can give decent results too.
25
+
26
+ **V2:** Now uses a mix of Qwen-Image-Edit accelerators, mixing both 8 and 4 steps in one. Also significantly tweaked the NSFW LORAs for better all-around SFW and NSFW use. sa_solver/simple strongly recommended.
27
+
28
+ **V3:** Uses new Qwen-Image-Edit lightning LORAs for much better results. Also significantly adjusted NSFW LORA mix, removing poor ones and increasing quality ones. sa_solver/beta highly recommended.
29
+
30
+ **V4:** Mix of many Qwen Edit and base Qwen accelerators, which I think gives better results. Added a touch of a skin correction LORA. **4-5 steps: use sa_solver/simple, lcm/beta or euler_a/beta** and **6-8 steps: use lcm/beta or euler_a/beta only**.
31
+
32
+ **V5:** NSFW and SFW use cases interfered with eachother too much, so I separated them to specialize in their use cases. Updated "snofs" and "qwen4play" NSFW LORAs + Meta4 for v5.2, then added "Qwen Image NSFW Adv." by fok3827 for v5.3. **SFW: lcm/beta or er_sde/beta generally recommended** and **NSFW: lcm/normal recommended**. Prompting "Professional digital photography" helps reduce the plastic look.
33
+
34
+ **V6:** Attempt at valiantcat/Qwen-Image-Edit-MeiTu and partially chestnutlzj/Edit-R1-Qwen-Image-Edit-2509 as a base model. However, this was a broken merge. It appears using them as LORAs may work better and I need to cook some more to find something useable. Stay on v5 until something newer comes out.
35
+
36
+ **V7:** valiantcat/Qwen-Image-Edit-MeiTu and chestnutlzj/Edit-R1-Qwen-Image-Edit-2509 included as LORAs. Accelerator and NSFW LORAs tweaks (v7.1 is more NSFW-heavy). This seemed to be working much better. **lcm/sgm_uniform recommended for 4-6 steps, lcm/normal for 7-8 steps**.
37
+
38
+ **V8:** Using BF16 to load in FP32 LORAs, only to scale down to FP8 for saving. This seems to help resolve "grid" issues and improves quality. Tweaked accelerator amounts. Significant NSFW LORA tweaks (and new SNOFS). **euler_a/beta recommended for 4-6 steps, lcm/normal for 7-8 steps**.
39
+
40
+ **V9:** OK, I lied. "Rebalancing" and "Smartphone Photoreal" LORAs really do help image generations for both SFW and NSFW purposes. If you don't want those LORAs integrated (like making anime or cartoons), use the "Lite" versions. Also, I had a typo in accelerators in V8 that has been fixed for V9. Tweaked NSFW LORAs and significantly reduced how heavy they need to be applied, which should hopefully help consistency. **euler_a/beta recommended for 4-6 steps**. More steps usually work better with sgm_normal or normal schedulers.
41
+
42
+ **V10:** This is kinda a mix of v5 and v9. MeiTu and Edit-R1 dropped. I'm keeping the "Rebalancing" and "Smartphone" LORAs at half strengths which I think help skin, variety and composition. NSFW LORAs closely resemble v5.3 (but with updated v1.2 snofs). v10.4 NSFW tweaked to improve character consistency and penises. **euler/beta strongly recommended for 4-8 steps** but **euler_a/sgm_uniform recommended for NSFW v10.2+**.
43
+
44
+ **V11:** Tweaked NSFW LORAs, using fewer to rely on more compatible ones instead. Spread out "realism" LORAs to more at lower strength. **euler/beta recommended** for both NSFW and SFW, but feel free to experiment with others!
45
+
46
+ **V12:** Getting inpatient waiting for Qwen Edit 2511 (2512?), so went looking for more LORAs and tweaks to reduce the "plastic" look. **euler/sgm_uniform for SFW** and **er_sde/sgm_uniform for NSFW**, although experimentation is healthy is this context.
47
+
48
+ **V13:** Tweaks to included LORAs in an attempt to reduce gridlines and increase character consistency (without returning the plastic look). **er_sde/beta recommended**.
49
+
50
+ **V14:** Trimmed LORAs which may have been interfereing with character consistency (while hopefully still reducing the "plastic" look). Updated new SNOFS v1.3 for NSFW and trimmed poor NSFW LORAs too. **er_sde/beta recommended**.
51
+
52
+ **V14.1:** Added "InSubject" LORA to both SFW and NSFW merges to improve character consistency. Otherwise, generally the same as v14. **er_sde/beta recommended**.
53
+
54
+ **V15:** Now using Qwen-Edit-2511! Slight tweaks to LORAs. Skipped making NSFW merge because this base merge wasn't that great. **er_sde/beta recommended**, but please experiment.
55
+
56
+ **V16:** Noticed some degradation from previous LORAs (mostly "realism" LORAs) that are not needed/compatible with 2511, so slimmed them down. Further tweaks to NSFW LORAs (added "Sex-tacular"). **er_sde/beta recommended**.
57
+
58
+ **V17:** Merged 2509 and 2511 together with the goal of correcting contrast issues and LORA compatibility with 2511 while maintaining character consistency. **euler_ancestral/beta highly recommended**.
59
+
60
+ **V18:** More 2511, a bit less 2509. Different merging method that I think corrects an issue I was having with many input images. **euler_ancestral/beta recommended**
61
+
62
+ **V19:** New Lightning Edit 2511 8-step mixed in (still recommend 4-8 steps). Also a new NSFW LORA (GNASS for Qwen 2512) that worked quite well in the NSFW merge. **er_sde/beta or euler_ancestral/beta recommended**.
63
+
64
+ **V20:** Returning to 100% Qwen Edit 2511. Tweaks to accelerators to hopefully give better results without 2509 mixed in. Added a minor amount of "BestFaceSwap". Tweaks to realism LORAs and significant tweaks to NSFW LORAs. **euler_ancestral/beta recommended**
65
+
66
+ **V21:** Removed older "realism" LORAs and added new 2511 "anything2real" and "anime2real" LORAs in hopes of improving skin texture and overall edit functionality. Tweaks to NSFW LORA weights. If you are soley interested in flat 2D anime, v20 or v19 may be suited better (this trends more realistic) **euler_a/beta recommended**
67
+
68
+ **V22:** Removed "anything2real" and "anime2real" which were stomping styles and causing inconsistencies. Added small amounts of JibMix Skin and qwen-skin-edit v1.1 to combat 'plastic' instead. Significant tweaks to NSFW LORA weights again. **euler_ancestral/beta recommended**
69
+
70
+ **V23:** Tweaks to skin and realism LORAs (removed JibMix which seemed to cause oddities). Reduced a "plastic" NSFW LORA that was making realistic results more difficult in that merge. This one was feeling pretty good. **euler_ancestral/beta recommended**
fixed-textencode-node/.can_replace_comfyui_node_with_this.v2_is_best ADDED
File without changes
fixed-textencode-node/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Replace your nodes_qwen.py file with one of these, which will improve your "native" TextEncodeQwenEditPlus node. You should be able to find the nodes_qwen.py file in your ComfyUI/comfy_extras/ folder.
2
+
3
+ * It increases the number of image inputs from 3 to 4.
4
+ * It slightly tweaks the system input to be a bit more versatile.
5
+ * It fixes an issue where the image would get cut off and mirror itself sometimes by cropping better.
6
+ * Addresses unexpected zoom / cropping problems
7
+
8
+ The "original" allows you to input a "target_size" which will scale the input images to better match your output size (which can fix zooming issues). Usually setting it to 90% of your largest image size works.
9
+
10
+ v2 takes a latent as an input, which should contain your target image size. The node will automatically scale input images to best match that target latent size. If you do not provide a latent input, it will not do any scaling.
11
+
12
+ **I highly recommend the v2 version**
fixed-textencode-node/nodes_qwen.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import node_helpers
2
+ import comfy.utils
3
+ import math
4
+ from typing_extensions import override
5
+ from comfy_api.latest import ComfyExtension, io
6
+
7
+ class TextEncodeQwenImageEdit(io.ComfyNode):
8
+ @classmethod
9
+ def define_schema(cls):
10
+ return io.Schema(
11
+ node_id="TextEncodeQwenImageEdit",
12
+ category="advanced/conditioning",
13
+ inputs=[
14
+ io.Clip.Input("clip"),
15
+ io.String.Input("prompt", multiline=True, dynamic_prompts=True),
16
+ io.Vae.Input("vae", optional=True),
17
+ io.Image.Input("image", optional=True),
18
+ ],
19
+ outputs=[
20
+ io.Conditioning.Output(),
21
+ ],
22
+ )
23
+
24
+ @classmethod
25
+ def execute(cls, clip, prompt, vae=None, image=None) -> io.NodeOutput:
26
+ ref_latent = None
27
+ if image is None:
28
+ images = []
29
+ else:
30
+ samples = image.movedim(-1, 1)
31
+ total = int(1024 * 1024)
32
+
33
+ scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
34
+ width = round(samples.shape[3] * scale_by)
35
+ height = round(samples.shape[2] * scale_by)
36
+
37
+ s = comfy.utils.common_upscale(samples, width, height, "area", "disabled")
38
+ image = s.movedim(1, -1)
39
+ images = [image[:, :, :, :3]]
40
+ if vae is not None:
41
+ ref_latent = vae.encode(image[:, :, :, :3])
42
+
43
+ tokens = clip.tokenize(prompt, images=images)
44
+ conditioning = clip.encode_from_tokens_scheduled(tokens)
45
+ if ref_latent is not None:
46
+ conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
47
+ return io.NodeOutput(conditioning)
48
+
49
+
50
+ class TextEncodeQwenImageEditPlus(io.ComfyNode):
51
+ @classmethod
52
+ def define_schema(cls):
53
+ return io.Schema(
54
+ node_id="TextEncodeQwenImageEditPlus",
55
+ category="advanced/conditioning",
56
+ inputs=[
57
+ io.Clip.Input("clip"),
58
+ io.String.Input("prompt", multiline=True, dynamic_prompts=True),
59
+ io.Vae.Input("vae", optional=True),
60
+ io.Image.Input("image1", optional=True),
61
+ io.Image.Input("image2", optional=True),
62
+ io.Image.Input("image3", optional=True),
63
+ io.Image.Input("image4", optional=True),
64
+ io.Int.Input("target_size", optional=True, default=896, min=128, max=2048, step=32),
65
+ ],
66
+ outputs=[
67
+ io.Conditioning.Output(),
68
+ ],
69
+ )
70
+
71
+ @classmethod
72
+ def execute(cls, clip, prompt, vae=None, image1=None, image2=None, image3=None, image4=None, target_size=896) -> io.NodeOutput:
73
+ ref_latents = []
74
+ images = [image1, image2, image3, image4]
75
+ images_vl = []
76
+ llama_template = "<|im_start|>system\nDescribe key details of the input image (including any objects, characters, poses, facial features, clothing, setting, textures and style), then explain how the user's text instruction should alter, modify or recreate the image. Generate a new image that meets the user's requirements, which can vary from a small change to a completely new image using inputs as a guide.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n"
77
+ image_prompt = ""
78
+
79
+ for i, image in enumerate(images):
80
+ if image is not None:
81
+ samples = image.movedim(-1, 1)
82
+ total = int(384 * 384)
83
+
84
+ scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
85
+ width = round(samples.shape[3] * scale_by)
86
+ height = round(samples.shape[2] * scale_by)
87
+
88
+ s = comfy.utils.common_upscale(samples, width, height, "area", "disabled")
89
+ images_vl.append(s.movedim(1, -1))
90
+ if vae is not None:
91
+ total = int(target_size * target_size)
92
+ scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
93
+
94
+ height = int(samples.shape[2] * scale_by / 32) * 32
95
+ width = int(samples.shape[3] * scale_by / 32) * 32
96
+
97
+ s = comfy.utils.common_upscale(samples, width, height, "lanczos", "center")
98
+ ref_latents.append(vae.encode(s.movedim(1, -1)[:, :, :, :3]))
99
+
100
+ image_prompt += "Picture {}: <|vision_start|><|image_pad|><|vision_end|>".format(i + 1)
101
+
102
+ tokens = clip.tokenize(image_prompt + prompt, images=images_vl, llama_template=llama_template)
103
+ conditioning = clip.encode_from_tokens_scheduled(tokens)
104
+ if len(ref_latents) > 0:
105
+ conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True)
106
+ return io.NodeOutput(conditioning)
107
+
108
+ class QwenExtension(ComfyExtension):
109
+ @override
110
+ async def get_node_list(self) -> list[type[io.ComfyNode]]:
111
+ return [
112
+ TextEncodeQwenImageEdit,
113
+ TextEncodeQwenImageEditPlus,
114
+ ]
115
+
116
+
117
+ async def comfy_entrypoint() -> QwenExtension:
118
+ return QwenExtension()
fixed-textencode-node/nodes_qwen.v2.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import node_helpers
2
+ import comfy.utils
3
+ import math
4
+ from typing_extensions import override
5
+ from comfy_api.latest import ComfyExtension, io
6
+
7
+ class TextEncodeQwenImageEdit(io.ComfyNode):
8
+ @classmethod
9
+ def define_schema(cls):
10
+ return io.Schema(
11
+ node_id="TextEncodeQwenImageEdit",
12
+ category="advanced/conditioning",
13
+ inputs=[
14
+ io.Clip.Input("clip"),
15
+ io.String.Input("prompt", multiline=True, dynamic_prompts=True),
16
+ io.Vae.Input("vae", optional=True),
17
+ io.Image.Input("image", optional=True),
18
+ ],
19
+ outputs=[
20
+ io.Conditioning.Output(),
21
+ ],
22
+ )
23
+
24
+ @classmethod
25
+ def execute(cls, clip, prompt, vae=None, image=None) -> io.NodeOutput:
26
+ ref_latent = None
27
+ if image is None:
28
+ images = []
29
+ else:
30
+ samples = image.movedim(-1, 1)
31
+ total = int(1024 * 1024)
32
+
33
+ scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
34
+ width = round(samples.shape[3] * scale_by)
35
+ height = round(samples.shape[2] * scale_by)
36
+
37
+ s = comfy.utils.common_upscale(samples, width, height, "area", "disabled")
38
+ image = s.movedim(1, -1)
39
+ images = [image[:, :, :, :3]]
40
+ if vae is not None:
41
+ ref_latent = vae.encode(image[:, :, :, :3])
42
+
43
+ tokens = clip.tokenize(prompt, images=images)
44
+ conditioning = clip.encode_from_tokens_scheduled(tokens)
45
+ if ref_latent is not None:
46
+ conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
47
+ return io.NodeOutput(conditioning)
48
+
49
+
50
+ class TextEncodeQwenImageEditPlus(io.ComfyNode):
51
+ @classmethod
52
+ def define_schema(cls):
53
+ return io.Schema(
54
+ node_id="TextEncodeQwenImageEditPlus",
55
+ category="advanced/conditioning",
56
+ inputs=[
57
+ io.Clip.Input("clip"),
58
+ io.String.Input("prompt", multiline=True, dynamic_prompts=True),
59
+ io.Vae.Input("vae", optional=True),
60
+ io.Image.Input("image1", optional=True),
61
+ io.Image.Input("image2", optional=True),
62
+ io.Image.Input("image3", optional=True),
63
+ io.Image.Input("image4", optional=True),
64
+ io.Latent.Input("target_latent", optional=True),
65
+ ],
66
+ outputs=[
67
+ io.Conditioning.Output(),
68
+ ],
69
+ )
70
+
71
+ @classmethod
72
+ def execute(cls, clip, prompt, vae=None, image1=None, image2=None, image3=None, image4=None, target_latent=None) -> io.NodeOutput:
73
+ ref_latents = []
74
+ images = [image1, image2, image3, image4]
75
+ images_vl = []
76
+ llama_template = "<|im_start|>system\nDescribe key details of the input image (including any objects, characters, poses, facial features, clothing, setting, textures and style), then explain how the user's text instruction should alter, modify or recreate the image. Generate a new image that meets the user's requirements, which can vary from a small change to a completely new image using inputs as a guide.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n"
77
+ image_prompt = ""
78
+
79
+ for i, image in enumerate(images):
80
+ if image is not None:
81
+ samples = image.movedim(-1, 1)
82
+ total = int(384 * 384)
83
+
84
+ scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
85
+ width = round(samples.shape[3] * scale_by)
86
+ height = round(samples.shape[2] * scale_by)
87
+
88
+ s = comfy.utils.common_upscale(samples, width, height, "lanczos", "center")
89
+ images_vl.append(s.movedim(1, -1))
90
+ if vae is not None:
91
+ if target_latent is not None:
92
+ twidth = target_latent["samples"].shape[-1] * 8
93
+ theight = target_latent["samples"].shape[-2] * 8
94
+ s = comfy.utils.common_upscale(samples, twidth, theight, "lanczos", "center")
95
+ else:
96
+ s = samples
97
+ ref_latents.append(vae.encode(s.movedim(1, -1)[:, :, :, :3]))
98
+
99
+ image_prompt += "Picture {}: <|vision_start|><|image_pad|><|vision_end|>".format(i + 1)
100
+
101
+ tokens = clip.tokenize(image_prompt + prompt, images=images_vl, llama_template=llama_template)
102
+ conditioning = clip.encode_from_tokens_scheduled(tokens)
103
+ if len(ref_latents) > 0:
104
+ conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True)
105
+ return io.NodeOutput(conditioning)
106
+
107
+ class QwenExtension(ComfyExtension):
108
+ @override
109
+ async def get_node_list(self) -> list[type[io.ComfyNode]]:
110
+ return [
111
+ TextEncodeQwenImageEdit,
112
+ TextEncodeQwenImageEditPlus,
113
+ ]
114
+
115
+
116
+ async def comfy_entrypoint() -> QwenExtension:
117
+ return QwenExtension()
v10/Qwen-Rapid-AIO-NSFW-v10.2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f6a2d91475ca487cae7e9347cedea4b96bfb06e6c7914dc1680607047a0c33
3
+ size 28431829023
v10/Qwen-Rapid-AIO-NSFW-v10.4.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6f4471a09917ddda5a4ac503341745f917e2fb8d8091c85d3cfd8b53c3405ee
3
+ size 28431828927
v10/Qwen-Rapid-AIO-NSFW-v10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df28b664316e9eea26e72e7dce09108e1d0b3c7fe442c39c05085474816a313b
3
+ size 28431827935
v10/Qwen-Rapid-AIO-SFW-v10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bfa90111a1ad116eb1f9684055ec2ced9b727a183c3ffd3a56b949a23d4db52
3
+ size 28431827775
v11/Qwen-Rapid-AIO-NSFW-v11.1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:331c0841eb35bc89f5ad450efee3bab26eba75ef6a204552b2a3aa09e8eeb67b
3
+ size 28431829495
v11/Qwen-Rapid-AIO-NSFW-v11.3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec848e22f66b3ee0daabb79829c067454ec4728853664e9d5b46e503c4caea8b
3
+ size 36178999106
v11/Qwen-Rapid-AIO-NSFW-v11.4.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e3b3a565536b41f4aa1902e24c26723e10adfabca1cbb813be680b93ab6304
3
+ size 28431828903
v11/Qwen-Rapid-AIO-NSFW-v11.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04321f9667ad5e6ef5a99a7dde6fb5a4caabde5772a3c2bf04f9cc8782d2c0e4
3
+ size 28431829231
v11/Qwen-Rapid-AIO-SFW-v11.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:782232a127d48b4d2831d0e89261e7caaf459254ff1c61017988d3eab546d3cd
3
+ size 28431829223
v12/Qwen-Rapid-AIO-NSFW-v12.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1528d7c9d2d2cb589b1bf59c2006a20fd1d11136fe759e9e50e4ca3ff8bfeb01
3
+ size 28431828279
v12/Qwen-Rapid-AIO-SFW-v12.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b690e5c1efb630e7141c20e7bf09a975aa2cd077a033b2261418bb6aecc12430
3
+ size 28431829599
v13/Qwen-Rapid-AIO-NSFW-v13.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11a03cf20a25d7bcc77ade0f3111970c32250dcc8424d87ce10af75172f751f3
3
+ size 28431827975
v13/Qwen-Rapid-AIO-SFW-v13.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53691c3f66559b240c0e67e0457312e6563cfaf225c6292e1d5351ce7e1e141f
3
+ size 28431827839
v14/Qwen-Rapid-AIO-NSFW-v14.1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12793b4ffc2a57ebc034e8df46b5690e6992c5b26a7f48ce0504f91f995eab18
3
+ size 28431827591
v14/Qwen-Rapid-AIO-NSFW-v14.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d9f208f3c031eba41ff14894b6395881eba3e58de1b342ecedfda06d9b432e
3
+ size 28431827903
v14/Qwen-Rapid-AIO-SFW-v14.1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776d5c83591c1530b52aafa8b800933adc1b666b934caea7675f774eaefb3f33
3
+ size 28431827727
v14/Qwen-Rapid-AIO-SFW-v14.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10e80d81b4acbb4aa0c2aa29f67da0e950a7b20622bd0dd937bfe8bd9bb72fb5
3
+ size 28431827903
v15/.no_nsfw_version_for_this ADDED
File without changes
v15/Qwen-Rapid-AIO-SFW-v15.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e86fc8cb7812a462578871b9df6d20a3ea3576622752c01a8bb7ce41224758c
3
+ size 28431827823
v16/Qwen-Rapid-AIO-NSFW-v16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8260ea18d81bc1805e70a63ca09f043f8666610e0db28b3be3f863d8fdfcb29e
3
+ size 28431827903
v16/Qwen-Rapid-AIO-SFW-v16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec79612014a4f19b94020693c7224944ce50e424b75005268f0af5fab6360a44
3
+ size 28431827879
v17/Qwen-Rapid-AIO-NSFW-v17.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4fb2ecc3789823d9ecfd8782312211b3c23f2e93531680ebbd279bbf3568213
3
+ size 28431830519
v17/Qwen-Rapid-AIO-SFW-v17.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:426ec039bb5f424c068649b84024bad22858291f74993c9324cccd41bfb989a0
3
+ size 28431830935
v18/Qwen-Rapid-AIO-NSFW-v18.1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e38652723bfc23cc54e3468115d9eebedc9753d08b7523aa1511166cd70c1156
3
+ size 28431843567
v18/Qwen-Rapid-AIO-NSFW-v18.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e81c86567a7812cecbfa0be69a652934f431f428e0cdd1ef2f38c6e029b7c5a
3
+ size 28431844103
v18/Qwen-Rapid-AIO-SFW-v18.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5728e1640b636cea518b57a67337c8a68057c0135c1d099575f2460cebf3cde7
3
+ size 28431844111
v19/Qwen-Rapid-AIO-NSFW-v19.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba71575515709c9912560d1176b2386eaa49294fedc6ce57b9734aa57e91e5ac
3
+ size 28431843583
v19/Qwen-Rapid-AIO-SFW-v19.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7113d4b1c0210539d3bc1582a1d48eb0450a851abe7444666ccd97e04e6a4f12
3
+ size 28431843591
v20/Qwen-Rapid-AIO-NSFW-v20.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0bf66bfd005ce87b4907a26086a8d488fe0f46c5e06ac0011d1f2c3e2258932
3
+ size 28431840903
v20/Qwen-Rapid-AIO-SFW-v20.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff57a0c91a4d2a493a7caef0f16fcfda59b1310dd7972a7e878551059fe2b41
3
+ size 28431840911
v21/Qwen-Rapid-AIO-NSFW-v21.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84c5725e8a8bce61c7f9217621bc702ec24675439f25108fe20089f3a535eb78
3
+ size 28431841071
v21/Qwen-Rapid-AIO-SFW-v21.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18690c4ec33fde2960fea95596ec63560731bc919a300ab2e5b1ab98c3a9e95b
3
+ size 28431841071
v22/Qwen-Rapid-AIO-NSFW-v22.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e8d7689ceaca6e60305c07c3cfb697a70b312da2d3827598c7e932930da3362
3
+ size 28431839775
v22/Qwen-Rapid-AIO-SFW-v22.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d419320329eef7dce757cef6eca89e670766bfbbe8c346dd78b3def1e109b0d
3
+ size 28431839775
v23/Qwen-Rapid-AIO-NSFW-v23.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdb919fc81bea63f13759967fc92c9118142e5c70d4e6795199233a35eefa233
3
+ size 28431840023
v23/Qwen-Rapid-AIO-SFW-v23.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:060c54a0deda2b802fcc850da48c5406a2a31313b5b5881eea3ceb68f7e18ba0
3
+ size 28431840023
v5/Qwen-Rapid-AIO-NSFW-v5.1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c8fbd5e972c475744ede2bc3f1184ffa999b68fa055eae043e0f2f5c7f1125
3
+ size 28978662810
v5/Qwen-Rapid-AIO-NSFW-v5.2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78352adb263fea99b3509d37d7d6065d6db3a92e99040d1e7c77d688ea037d29
3
+ size 28978664186