aodai_v1 / workflow_aodai_lora.json
dtthanh's picture
Upload workflow_aodai_lora.json
b756a7e verified
{
"last_node_id": 68,
"last_link_id": 90,
"nodes": [
{
"id": 8,
"type": "VAEDecode",
"pos": {
"0": 1083,
"1": 19
},
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 14,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 24
},
{
"name": "vae",
"type": "VAE",
"link": 12
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
60
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 22,
"type": "BasicGuider",
"pos": {
"0": 783,
"1": 212
},
"size": {
"0": 241.79998779296875,
"1": 46
},
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 85,
"slot_index": 0
},
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 74,
"slot_index": 1
}
],
"outputs": [
{
"name": "GUIDER",
"type": "GUIDER",
"links": [
30
],
"slot_index": 0,
"shape": 3
}
],
"properties": {
"Node name for S&R": "BasicGuider"
}
},
{
"id": 16,
"type": "KSamplerSelect",
"pos": {
"0": 1110,
"1": 648
},
"size": {
"0": 315,
"1": 58
},
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "SAMPLER",
"type": "SAMPLER",
"links": [
19
],
"shape": 3
}
],
"properties": {
"Node name for S&R": "KSamplerSelect"
},
"widgets_values": [
"euler"
]
},
{
"id": 25,
"type": "RandomNoise",
"pos": {
"0": 1128,
"1": 353
},
"size": {
"0": 315,
"1": 82
},
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "NOISE",
"type": "NOISE",
"links": [
37
],
"shape": 3
}
],
"properties": {
"Node name for S&R": "RandomNoise"
},
"widgets_values": [
771605217453917,
"randomize"
]
},
{
"id": 11,
"type": "DualCLIPLoader",
"pos": {
"0": -473,
"1": 356
},
"size": {
"0": 315,
"1": 106
},
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "CLIP",
"type": "CLIP",
"links": [
83
],
"slot_index": 0,
"shape": 3
}
],
"properties": {
"Node name for S&R": "DualCLIPLoader"
},
"widgets_values": [
"t5xxl_fp8_e4m3fn.safetensors",
"clip_l.safetensors",
"flux"
]
},
{
"id": 10,
"type": "VAELoader",
"pos": {
"0": 611,
"1": -42
},
"size": {
"0": 315,
"1": 58
},
"flags": {},
"order": 3,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
12,
64
],
"slot_index": 0,
"shape": 3
}
],
"properties": {
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"ae.safetensors"
]
},
{
"id": 12,
"type": "UNETLoader",
"pos": {
"0": -493,
"1": 204
},
"size": {
"0": 315,
"1": 82
},
"flags": {},
"order": 4,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
38,
61,
82
],
"slot_index": 0,
"shape": 3
}
],
"properties": {
"Node name for S&R": "UNETLoader"
},
"widgets_values": [
"flux1-dev-fp8.safetensors",
"fp8_e4m3fn"
]
},
{
"id": 41,
"type": "UpscaleModelLoader",
"pos": {
"0": 1188,
"1": -183
},
"size": {
"0": 315,
"1": 58
},
"flags": {},
"order": 5,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "UPSCALE_MODEL",
"type": "UPSCALE_MODEL",
"links": [
65
],
"shape": 3
}
],
"properties": {
"Node name for S&R": "UpscaleModelLoader"
},
"widgets_values": [
"4x_foolhardy_Remacri.pth"
]
},
{
"id": 39,
"type": "UltimateSDUpscale",
"pos": {
"0": 1462,
"1": -59
},
"size": {
"0": 315,
"1": 614
},
"flags": {},
"order": 15,
"mode": 4,
"inputs": [
{
"name": "image",
"type": "IMAGE",
"link": 60
},
{
"name": "model",
"type": "MODEL",
"link": 61
},
{
"name": "positive",
"type": "CONDITIONING",
"link": null,
"slot_index": 2
},
{
"name": "negative",
"type": "CONDITIONING",
"link": null,
"slot_index": 3
},
{
"name": "vae",
"type": "VAE",
"link": 64,
"slot_index": 4
},
{
"name": "upscale_model",
"type": "UPSCALE_MODEL",
"link": 65,
"slot_index": 5
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
66
],
"slot_index": 0,
"shape": 3
}
],
"properties": {
"Node name for S&R": "UltimateSDUpscale"
},
"widgets_values": [
2,
288337876781029,
"randomize",
20,
8,
"euler",
"simple",
0.2,
"Linear",
512,
512,
8,
32,
"None",
0,
64,
8,
16,
true,
false
]
},
{
"id": 66,
"type": "Note",
"pos": {
"0": -145,
"1": 971
},
"size": {
"0": 445.3786315917969,
"1": 251.9473419189453
},
"flags": {},
"order": 6,
"mode": 0,
"inputs": [],
"outputs": [],
"properties": {
"text": ""
},
"widgets_values": [
"young man, actor, intense expression, muddy clothes, t-shirt, jeans shorts, gym setting, dirt on body, hands clasped in front of body, brooding pose \n\nThe image shows a young man sitting on a wooden bench in a dark room. He is wearing a dark green t-shirt and grey pants. His hands are covered in mud and he appears to be deep in thought. His hair is messy and unkempt, and he has a serious expression on his face. The background is blurred, but it seems like he is in a workshop or garage. The overall mood of the image is somber and contemplative.\n"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 57,
"type": "LoraLoader",
"pos": {
"0": -132,
"1": 706
},
"size": {
"0": 394.9391174316406,
"1": 130.8064422607422
},
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 82
},
{
"name": "clip",
"type": "CLIP",
"link": 83
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
85
],
"slot_index": 0,
"shape": 3
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
87
],
"slot_index": 1,
"shape": 3
}
],
"properties": {
"Node name for S&R": "LoraLoader"
},
"widgets_values": [
"aodai_v1.safetensors",
1,
1
]
},
{
"id": 13,
"type": "SamplerCustomAdvanced",
"pos": {
"0": 1099,
"1": 171
},
"size": {
"0": 355.20001220703125,
"1": 106
},
"flags": {},
"order": 13,
"mode": 0,
"inputs": [
{
"name": "noise",
"type": "NOISE",
"link": 37,
"slot_index": 0
},
{
"name": "guider",
"type": "GUIDER",
"link": 30,
"slot_index": 1
},
{
"name": "sampler",
"type": "SAMPLER",
"link": 19,
"slot_index": 2
},
{
"name": "sigmas",
"type": "SIGMAS",
"link": 20,
"slot_index": 3
},
{
"name": "latent_image",
"type": "LATENT",
"link": 89,
"slot_index": 4
}
],
"outputs": [
{
"name": "output",
"type": "LATENT",
"links": [
24
],
"slot_index": 0,
"shape": 3
},
{
"name": "denoised_output",
"type": "LATENT",
"links": null,
"shape": 3
}
],
"properties": {
"Node name for S&R": "SamplerCustomAdvanced"
}
},
{
"id": 68,
"type": "Reroute",
"pos": {
"0": 1541.0958251953125,
"1": 690.804443359375
},
"size": [
75,
26
],
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "",
"type": "*",
"link": 90
}
],
"outputs": [
{
"name": "",
"type": "LATENT",
"links": [
89
]
}
],
"properties": {
"showOutputText": false,
"horizontal": false
}
},
{
"id": 67,
"type": "Empty Latent by Ratio (WLSH)",
"pos": {
"0": 1112,
"1": 783
},
"size": {
"0": 352.79998779296875,
"1": 170
},
"flags": {},
"order": 7,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "latent",
"type": "LATENT",
"links": [
90
],
"shape": 3
},
{
"name": "width",
"type": "INT",
"links": null,
"shape": 3
},
{
"name": "height",
"type": "INT",
"links": null,
"shape": 3
}
],
"properties": {
"Node name for S&R": "Empty Latent by Ratio (WLSH)"
},
"widgets_values": [
"16:9",
"portrait",
576,
2
]
},
{
"id": 43,
"type": "CLIPTextEncodeFlux",
"pos": {
"0": 492,
"1": 638
},
"size": {
"0": 432.8548278808594,
"1": 348.077392578125
},
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 87
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
74
],
"slot_index": 0,
"shape": 3
}
],
"properties": {
"Node name for S&R": "CLIPTextEncodeFlux"
},
"widgets_values": [
"a photo of a young Asian women dressed in traditional Vietnamese dress called a0da1. She has long, straight black hair and fair skin. The person is smiling and walking alongside a bicycle with a basket. The setting appears to be outdoors, with trees and greenery in the background, suggesting a park or garden area. The overall atmosphere is bright and serene.",
"a photo of a young Asian women dressed in traditional Vietnamese dress called a0da1. She has long, straight black hair and fair skin. The person is smiling and walking alongside a bicycle with a basket. The setting appears to be outdoors, with trees and greenery in the background, suggesting a park or garden area. The overall atmosphere is bright and serene.",
3.5
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 17,
"type": "BasicScheduler",
"pos": {
"0": 1122,
"1": 485
},
"size": {
"0": 315,
"1": 106
},
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 38,
"slot_index": 0
}
],
"outputs": [
{
"name": "SIGMAS",
"type": "SIGMAS",
"links": [
20
],
"shape": 3
}
],
"properties": {
"Node name for S&R": "BasicScheduler"
},
"widgets_values": [
"simple",
28,
1
]
},
{
"id": 29,
"type": "Image Save",
"pos": {
"0": 1791,
"1": 9
},
"size": [
581.7843206358411,
931.4156938429653
],
"flags": {},
"order": 16,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 66
}
],
"outputs": [
{
"name": "images",
"type": "IMAGE",
"links": null,
"shape": 3
},
{
"name": "files",
"type": "STRING",
"links": null,
"shape": 3
}
],
"properties": {
"Node name for S&R": "Image Save"
},
"widgets_values": [
"flux[time(%Y-%m-%d)]",
"ComfyUI",
"_",
4,
"false",
"png",
300,
100,
"true",
"false",
"false",
"false",
"true",
"true",
"true"
]
}
],
"links": [
[
12,
10,
0,
8,
1,
"VAE"
],
[
19,
16,
0,
13,
2,
"SAMPLER"
],
[
20,
17,
0,
13,
3,
"SIGMAS"
],
[
24,
13,
0,
8,
0,
"LATENT"
],
[
30,
22,
0,
13,
1,
"GUIDER"
],
[
37,
25,
0,
13,
0,
"NOISE"
],
[
38,
12,
0,
17,
0,
"MODEL"
],
[
60,
8,
0,
39,
0,
"IMAGE"
],
[
61,
12,
0,
39,
1,
"MODEL"
],
[
64,
10,
0,
39,
4,
"VAE"
],
[
65,
41,
0,
39,
5,
"UPSCALE_MODEL"
],
[
66,
39,
0,
29,
0,
"IMAGE"
],
[
74,
43,
0,
22,
1,
"CONDITIONING"
],
[
82,
12,
0,
57,
0,
"MODEL"
],
[
83,
11,
0,
57,
1,
"CLIP"
],
[
85,
57,
0,
22,
0,
"MODEL"
],
[
87,
57,
1,
43,
0,
"CLIP"
],
[
89,
68,
0,
13,
4,
"LATENT"
],
[
90,
67,
0,
68,
0,
"*"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.2888888642282197,
"offset": [
1274.0831703760969,
402.4944507240739
]
}
},
"version": 0.4
}