cyd0806 commited on
Commit
96b5a4c
·
verified ·
1 Parent(s): 0422a60

Upload inference.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference.py +124 -0
inference.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os,sys
2
+ import ipdb
3
+ current_dir = os.path.dirname(__file__)
4
+ sys.path.append(os.path.abspath(os.path.join(current_dir, '..')))
5
+ import torch
6
+ from src.condition import Condition
7
+ from PIL import Image
8
+ from src.SubjectGeniusTransformer2DModel import SubjectGeniusTransformer2DModel
9
+ from src.SubjectGeniusPipeline import SubjectGeniusPipeline
10
+ from accelerate.utils import set_seed
11
+ import json
12
+ import argparse
13
+ import cv2
14
+ import numpy as np
15
+ from datetime import datetime
16
+ weight_dtype = torch.bfloat16
17
+ device = torch.device("cuda:0")
18
+
19
+
20
+
21
+
22
+ def parse_args(input_args=None):
23
+ parser = argparse.ArgumentParser(description="inference script.")
24
+ parser.add_argument("--pretrained_model_name_or_path", type=str,default="/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell",)
25
+ parser.add_argument("--transformer",type=str,default="/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell/transformer",)
26
+ parser.add_argument("--condition_types", type=str, nargs='+', default=["fill","subject"],)
27
+ parser.add_argument("--denoising_lora",type=str,default="/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Denoising_LoRA/subject_fill_union",)
28
+ parser.add_argument("--denoising_lora_weight",type=float,default=1.0,)
29
+ parser.add_argument("--condition_lora_dir",type=str,default="/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Condition_LoRA",)
30
+ parser.add_argument("--work_dir",type=str,default="/data/ydchen/VLP/SubjectGenius/output/inference_result",)
31
+ parser.add_argument("--seed", type=int, default=0)
32
+ parser.add_argument("--resolution",type=int,default=512,)
33
+ parser.add_argument("--canny",type=str,default=None)
34
+ parser.add_argument("--depth",type=str,default=None)
35
+ parser.add_argument("--fill",type=str,default="/data/ydchen/VLP/SubjectGenius/examples/window/background.jpg")
36
+ parser.add_argument("--subject",type=str,default="/data/ydchen/VLP/SubjectGenius/examples/window/subject.jpg")
37
+ parser.add_argument("--json",type=str,default="/data/ydchen/VLP/SubjectGenius/examples/window/1634_rank0_A decorative fabric topper for windows..json")
38
+ parser.add_argument("--prompt",type=str,default=None)
39
+ parser.add_argument("--num",type=int,default=1)
40
+ parser.add_argument("--version",type=str,default="training-free",choices=["training-based","training-free"])
41
+
42
+ args = parser.parse_args()
43
+ args.revision = None
44
+ args.variant = None
45
+ args.json = json.load(open(args.json))
46
+ if args.prompt is None:
47
+ args.prompt = args.json['description']
48
+ args.denoising_lora_name = os.path.basename(os.path.normpath(args.denoising_lora))
49
+ return args
50
+
51
+
52
+
53
+
54
+ if __name__ == "__main__":
55
+ args = parse_args()
56
+ transformer = SubjectGeniusTransformer2DModel.from_pretrained(
57
+ pretrained_model_name_or_path=args.transformer,
58
+ ).to(device = device, dtype=weight_dtype)
59
+
60
+ for condition_type in args.condition_types:
61
+ transformer.load_lora_adapter(f"{args.condition_lora_dir}/{condition_type}.safetensors", adapter_name=condition_type)
62
+
63
+ pipe = SubjectGeniusPipeline.from_pretrained(
64
+ args.pretrained_model_name_or_path,
65
+ torch_dtype = weight_dtype,
66
+ transformer = None
67
+ )
68
+ pipe.transformer = transformer
69
+
70
+ if args.version == "training-based":
71
+ pipe.transformer.load_lora_adapter(args.denoising_lora,adapter_name=args.denoising_lora_name, use_safetensors=True)
72
+ pipe.transformer.set_adapters([i for i in args.condition_types] + [args.denoising_lora_name],[1.0,1.0,args.denoising_lora_weight])
73
+ elif args.version == "training-free":
74
+ pipe.transformer.set_adapters([i for i in args.condition_types])
75
+
76
+ pipe = pipe.to(device)
77
+
78
+ # load conditions
79
+ # "no_process = True" means there is no need to run the canny or depth extraction or any other preparation for the input conditional images.
80
+ # which means the input conditional images can be used directly.
81
+ conditions = []
82
+ for condition_type in args.condition_types:
83
+ if condition_type == "subject":
84
+ conditions.append(Condition("subject", raw_img=Image.open(args.subject), no_process=True))
85
+ elif condition_type == "canny":
86
+ conditions.append(Condition("canny", raw_img=Image.open(args.canny), no_process=True))
87
+ elif condition_type == "depth":
88
+ conditions.append(Condition("depth", raw_img=Image.open(args.depth), no_process=True))
89
+ elif condition_type == "fill":
90
+ conditions.append(Condition("fill", raw_img=Image.open(args.fill), no_process=True))
91
+ else:
92
+ raise ValueError("Only support for subject, canny, depth, fill so far.")
93
+
94
+ # load prompt
95
+ prompt = args.prompt
96
+
97
+ if args.seed is not None:
98
+ set_seed(args.seed)
99
+
100
+ output_dir = os.path.join(args.work_dir, f"{datetime.now().strftime('%y_%m_%d-%H:%M')}")
101
+ os.makedirs(output_dir, exist_ok=True)
102
+
103
+ # generate
104
+ for i in range(args.num):
105
+ result_img = pipe(
106
+ prompt=prompt,
107
+ conditions=conditions,
108
+ height=512,
109
+ width=512,
110
+ num_inference_steps=8,
111
+ max_sequence_length=512,
112
+ model_config = {},
113
+ ).images[0]
114
+
115
+ concat_image = Image.new("RGB", (512 + len(args.condition_types) * 512, 512))
116
+ for j, cond_type in enumerate(args.condition_types):
117
+ cond_image = conditions[j].condition
118
+ if cond_type == "fill":
119
+ cond_image = cv2.rectangle(np.array(cond_image), args.json['bbox'][:2], args.json['bbox'][2:], color=(128, 128, 128),thickness=-1)
120
+ cond_image = Image.fromarray(cv2.rectangle(cond_image, args.json['bbox'][:2], args.json['bbox'][2:], color=(255, 215, 0), thickness=2))
121
+ concat_image.paste(cond_image, (j * 512, 0))
122
+ concat_image.paste(result_img, (j * 512 + 512, 0))
123
+ concat_image.save(os.path.join(output_dir, f"{i}_result.jpg"))
124
+ print(f"Done. Output saved at {output_dir}/{i}_result.jpg")