Commit
·
1452c34
1
Parent(s):
b948a73
fixed api naming
Browse files
app.py
CHANGED
|
@@ -20,7 +20,66 @@ import argparse
|
|
| 20 |
from model import CRM
|
| 21 |
from inference import generate3d
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
pipeline = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
rembg_session = rembg.new_session()
|
| 25 |
|
| 26 |
|
|
@@ -36,9 +95,10 @@ def expand_to_square(image, bg_color=(0, 0, 0, 0)):
|
|
| 36 |
return new_image
|
| 37 |
|
| 38 |
def check_input_image(input_image):
|
|
|
|
| 39 |
if input_image is None:
|
| 40 |
raise gr.Error("No image uploaded!")
|
| 41 |
-
|
| 42 |
|
| 43 |
def remove_background(
|
| 44 |
image: PIL.Image.Image,
|
|
@@ -78,100 +138,63 @@ def add_background(image, bg_color=(255, 255, 255)):
|
|
| 78 |
background = Image.new("RGBA", image.size, bg_color)
|
| 79 |
return Image.alpha_composite(background, image)
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
def preprocess_image(
|
| 83 |
-
"""
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
"--stage1_config",
|
| 134 |
-
type=str,
|
| 135 |
-
default="configs/nf7_v3_SNR_rd_size_stroke.yaml",
|
| 136 |
-
help="config for stage1",
|
| 137 |
-
)
|
| 138 |
-
parser.add_argument(
|
| 139 |
-
"--stage2_config",
|
| 140 |
-
type=str,
|
| 141 |
-
default="configs/stage2-v2-snr.yaml",
|
| 142 |
-
help="config for stage2",
|
| 143 |
-
)
|
| 144 |
-
|
| 145 |
-
parser.add_argument("--device", type=str, default="cuda")
|
| 146 |
-
args = parser.parse_args()
|
| 147 |
-
|
| 148 |
-
crm_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="CRM.pth")
|
| 149 |
-
specs = json.load(open("configs/specs_objaverse_total.json"))
|
| 150 |
-
model = CRM(specs)
|
| 151 |
-
model.load_state_dict(torch.load(crm_path, map_location="cpu"), strict=False)
|
| 152 |
-
model = model.to(args.device)
|
| 153 |
-
|
| 154 |
-
stage1_config = OmegaConf.load(args.stage1_config).config
|
| 155 |
-
stage2_config = OmegaConf.load(args.stage2_config).config
|
| 156 |
-
stage2_sampler_config = stage2_config.sampler
|
| 157 |
-
stage1_sampler_config = stage1_config.sampler
|
| 158 |
-
|
| 159 |
-
stage1_model_config = stage1_config.models
|
| 160 |
-
stage2_model_config = stage2_config.models
|
| 161 |
-
|
| 162 |
-
xyz_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="ccm-diffusion.pth")
|
| 163 |
-
pixel_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="pixel-diffusion.pth")
|
| 164 |
-
stage1_model_config.resume = pixel_path
|
| 165 |
-
stage2_model_config.resume = xyz_path
|
| 166 |
-
|
| 167 |
-
pipeline = TwoStagePipeline(
|
| 168 |
-
stage1_model_config,
|
| 169 |
-
stage2_model_config,
|
| 170 |
-
stage1_sampler_config,
|
| 171 |
-
stage2_sampler_config,
|
| 172 |
-
device=args.device,
|
| 173 |
-
dtype=torch.float32
|
| 174 |
-
)
|
| 175 |
|
| 176 |
_DESCRIPTION = '''
|
| 177 |
* Our [official implementation](https://github.com/thu-ml/CRM) uses UV texture instead of vertex color. It has better texture than this online demo.
|
|
@@ -179,72 +202,75 @@ _DESCRIPTION = '''
|
|
| 179 |
* If you find the output unsatisfying, try using different seeds:)
|
| 180 |
'''
|
| 181 |
|
| 182 |
-
# Gradio interface
|
| 183 |
-
with gr.Blocks() as demo:
|
| 184 |
-
gr.Markdown("# CRM: Single Image to 3D Textured Mesh with Convolutional Reconstruction Model")
|
| 185 |
gr.Markdown(_DESCRIPTION)
|
| 186 |
|
| 187 |
with gr.Row():
|
| 188 |
with gr.Column():
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
)
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
)
|
| 202 |
-
|
| 203 |
-
with gr.Row():
|
| 204 |
-
seed = gr.Number(value=1234, label="Seed", precision=0)
|
| 205 |
-
guidance_scale = gr.Number(value=5.5, minimum=3, maximum=10, label="Guidance scale")
|
| 206 |
-
step = gr.Number(value=30, minimum=30, maximum=100, label="Sample steps", precision=0)
|
| 207 |
-
|
| 208 |
-
generate_btn = gr.Button("Generate 3D shape")
|
| 209 |
-
|
| 210 |
-
gr.Examples(
|
| 211 |
-
examples=[os.path.join("examples", i) for i in os.listdir("examples")],
|
| 212 |
-
inputs=[image_input],
|
| 213 |
-
examples_per_page=20
|
| 214 |
)
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
processed = preprocess_image(image, "Auto Remove background", 1.0, "#7F7F7F")
|
| 229 |
-
|
| 230 |
-
# Generate the 3D model
|
| 231 |
-
pipeline.set_seed(seed)
|
| 232 |
-
rt_dict = pipeline(processed, scale=scale, step=step)
|
| 233 |
-
stage1_images = rt_dict["stage1_images"]
|
| 234 |
-
stage2_images = rt_dict["stage2_images"]
|
| 235 |
-
np_imgs = np.concatenate(stage1_images, 1)
|
| 236 |
-
np_xyzs = np.concatenate(stage2_images, 1)
|
| 237 |
-
|
| 238 |
-
glb_path = generate3d(model, np_imgs, np_xyzs, args.device)
|
| 239 |
-
return Image.fromarray(np_imgs), Image.fromarray(np_xyzs), glb_path
|
| 240 |
-
|
| 241 |
generate_btn.click(
|
| 242 |
-
fn=
|
| 243 |
-
inputs=[
|
| 244 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
)
|
| 246 |
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
|
|
|
| 20 |
from model import CRM
|
| 21 |
from inference import generate3d
|
| 22 |
|
| 23 |
+
# Move model initialization into a function that will be called by workers
|
| 24 |
+
def init_model():
|
| 25 |
+
parser = argparse.ArgumentParser()
|
| 26 |
+
parser.add_argument(
|
| 27 |
+
"--stage1_config",
|
| 28 |
+
type=str,
|
| 29 |
+
default="configs/nf7_v3_SNR_rd_size_stroke.yaml",
|
| 30 |
+
help="config for stage1",
|
| 31 |
+
)
|
| 32 |
+
parser.add_argument(
|
| 33 |
+
"--stage2_config",
|
| 34 |
+
type=str,
|
| 35 |
+
default="configs/stage2-v2-snr.yaml",
|
| 36 |
+
help="config for stage2",
|
| 37 |
+
)
|
| 38 |
+
parser.add_argument("--device", type=str, default="cuda")
|
| 39 |
+
args = parser.parse_args()
|
| 40 |
+
|
| 41 |
+
# Download model files
|
| 42 |
+
crm_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="CRM.pth")
|
| 43 |
+
specs = json.load(open("configs/specs_objaverse_total.json"))
|
| 44 |
+
model = CRM(specs)
|
| 45 |
+
model.load_state_dict(torch.load(crm_path, map_location="cpu"), strict=False)
|
| 46 |
+
model = model.to(args.device)
|
| 47 |
+
|
| 48 |
+
# Load configs
|
| 49 |
+
stage1_config = OmegaConf.load(args.stage1_config).config
|
| 50 |
+
stage2_config = OmegaConf.load(args.stage2_config).config
|
| 51 |
+
stage2_sampler_config = stage2_config.sampler
|
| 52 |
+
stage1_sampler_config = stage1_config.sampler
|
| 53 |
+
|
| 54 |
+
stage1_model_config = stage1_config.models
|
| 55 |
+
stage2_model_config = stage2_config.models
|
| 56 |
+
|
| 57 |
+
xyz_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="ccm-diffusion.pth")
|
| 58 |
+
pixel_path = hf_hub_download(repo_id="Zhengyi/CRM", filename="pixel-diffusion.pth")
|
| 59 |
+
stage1_model_config.resume = pixel_path
|
| 60 |
+
stage2_model_config.resume = xyz_path
|
| 61 |
+
|
| 62 |
+
pipeline = TwoStagePipeline(
|
| 63 |
+
stage1_model_config,
|
| 64 |
+
stage2_model_config,
|
| 65 |
+
stage1_sampler_config,
|
| 66 |
+
stage2_sampler_config,
|
| 67 |
+
device=args.device,
|
| 68 |
+
dtype=torch.float32
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
return model, pipeline
|
| 72 |
+
|
| 73 |
+
# Global variables to store model and pipeline
|
| 74 |
+
model = None
|
| 75 |
pipeline = None
|
| 76 |
+
|
| 77 |
+
def get_model():
|
| 78 |
+
global model, pipeline
|
| 79 |
+
if model is None or pipeline is None:
|
| 80 |
+
model, pipeline = init_model()
|
| 81 |
+
return model, pipeline
|
| 82 |
+
|
| 83 |
rembg_session = rembg.new_session()
|
| 84 |
|
| 85 |
|
|
|
|
| 95 |
return new_image
|
| 96 |
|
| 97 |
def check_input_image(input_image):
|
| 98 |
+
"""Check if the input image is valid"""
|
| 99 |
if input_image is None:
|
| 100 |
raise gr.Error("No image uploaded!")
|
| 101 |
+
return input_image
|
| 102 |
|
| 103 |
def remove_background(
|
| 104 |
image: PIL.Image.Image,
|
|
|
|
| 138 |
background = Image.new("RGBA", image.size, bg_color)
|
| 139 |
return Image.alpha_composite(background, image)
|
| 140 |
|
| 141 |
+
def add_random_background(image, color):
|
| 142 |
+
# Add a random background to the image
|
| 143 |
+
width, height = image.size
|
| 144 |
+
background = Image.new("RGBA", image.size, color)
|
| 145 |
+
return Image.alpha_composite(background, image)
|
| 146 |
|
| 147 |
+
def preprocess_image(input_image, background_choice, foreground_ratio, back_groud_color):
|
| 148 |
+
"""Preprocess the input image"""
|
| 149 |
+
try:
|
| 150 |
+
# Get model and pipeline when needed
|
| 151 |
+
model, pipeline = get_model()
|
| 152 |
+
|
| 153 |
+
# Convert to numpy array
|
| 154 |
+
np_image = np.array(input_image)
|
| 155 |
+
|
| 156 |
+
# Process background
|
| 157 |
+
if background_choice == "Remove Background":
|
| 158 |
+
np_image = rembg.remove(np_image, session=rembg_session)
|
| 159 |
+
elif background_choice == "Custom Background":
|
| 160 |
+
np_image = add_random_background(np_image, back_groud_color)
|
| 161 |
+
|
| 162 |
+
# Resize content if needed
|
| 163 |
+
if foreground_ratio != 1.0:
|
| 164 |
+
np_image = do_resize_content(Image.fromarray(np_image), foreground_ratio)
|
| 165 |
+
np_image = np.array(np_image)
|
| 166 |
+
|
| 167 |
+
return Image.fromarray(np_image)
|
| 168 |
+
except Exception as e:
|
| 169 |
+
print(f"Error in preprocess_image: {str(e)}")
|
| 170 |
+
raise e
|
| 171 |
+
|
| 172 |
+
def gen_image(processed_image, seed, scale, step):
|
| 173 |
+
"""Generate the 3D model"""
|
| 174 |
+
try:
|
| 175 |
+
# Get model and pipeline when needed
|
| 176 |
+
model, pipeline = get_model()
|
| 177 |
+
|
| 178 |
+
# Convert to numpy array
|
| 179 |
+
np_image = np.array(processed_image)
|
| 180 |
+
|
| 181 |
+
# Set random seed
|
| 182 |
+
torch.manual_seed(seed)
|
| 183 |
+
np.random.seed(seed)
|
| 184 |
+
|
| 185 |
+
# Generate images
|
| 186 |
+
np_imgs, np_xyzs = pipeline.generate(
|
| 187 |
+
np_image,
|
| 188 |
+
guidance_scale=scale,
|
| 189 |
+
num_inference_steps=step
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Generate 3D model
|
| 193 |
+
glb_path = generate3d(model, np_imgs, np_xyzs, args.device)
|
| 194 |
+
return Image.fromarray(np_imgs), Image.fromarray(np_xyzs), glb_path
|
| 195 |
+
except Exception as e:
|
| 196 |
+
print(f"Error in gen_image: {str(e)}")
|
| 197 |
+
raise e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
_DESCRIPTION = '''
|
| 200 |
* Our [official implementation](https://github.com/thu-ml/CRM) uses UV texture instead of vertex color. It has better texture than this online demo.
|
|
|
|
| 202 |
* If you find the output unsatisfying, try using different seeds:)
|
| 203 |
'''
|
| 204 |
|
| 205 |
+
# Create Gradio interface
|
| 206 |
+
with gr.Blocks(title="CRM: 3D Character Generation from Single Image") as demo:
|
|
|
|
| 207 |
gr.Markdown(_DESCRIPTION)
|
| 208 |
|
| 209 |
with gr.Row():
|
| 210 |
with gr.Column():
|
| 211 |
+
input_image = gr.Image(label="Input Image", type="pil")
|
| 212 |
+
background_choice = gr.Radio(
|
| 213 |
+
choices=["Remove Background", "Custom Background"],
|
| 214 |
+
value="Remove Background",
|
| 215 |
+
label="Background Option"
|
| 216 |
)
|
| 217 |
+
foreground_ratio = gr.Slider(
|
| 218 |
+
minimum=0.1,
|
| 219 |
+
maximum=1.0,
|
| 220 |
+
value=1.0,
|
| 221 |
+
step=0.1,
|
| 222 |
+
label="Foreground Ratio"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
)
|
| 224 |
+
back_groud_color = gr.ColorPicker(
|
| 225 |
+
label="Background Color",
|
| 226 |
+
value="#FFFFFF"
|
| 227 |
+
)
|
| 228 |
+
seed = gr.Number(
|
| 229 |
+
label="Seed",
|
| 230 |
+
value=42,
|
| 231 |
+
precision=0
|
| 232 |
+
)
|
| 233 |
+
scale = gr.Slider(
|
| 234 |
+
minimum=1.0,
|
| 235 |
+
maximum=20.0,
|
| 236 |
+
value=7.5,
|
| 237 |
+
step=0.1,
|
| 238 |
+
label="Guidance Scale"
|
| 239 |
+
)
|
| 240 |
+
step = gr.Slider(
|
| 241 |
+
minimum=1,
|
| 242 |
+
maximum=100,
|
| 243 |
+
value=50,
|
| 244 |
+
step=1,
|
| 245 |
+
label="Steps"
|
| 246 |
+
)
|
| 247 |
+
generate_btn = gr.Button("Generate 3D Model")
|
| 248 |
|
| 249 |
+
with gr.Column():
|
| 250 |
+
processed_image = gr.Image(label="Processed Image", type="pil")
|
| 251 |
+
output_image = gr.Image(label="Generated Image", type="pil")
|
| 252 |
+
output_xyz = gr.Image(label="Generated XYZ", type="pil")
|
| 253 |
+
output_glb = gr.Model3D(label="Generated 3D Model")
|
| 254 |
+
|
| 255 |
+
# Connect the functions with explicit API names
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
generate_btn.click(
|
| 257 |
+
fn=check_input_image,
|
| 258 |
+
inputs=[input_image],
|
| 259 |
+
outputs=[input_image],
|
| 260 |
+
api_name="check_input_image"
|
| 261 |
+
).success(
|
| 262 |
+
fn=preprocess_image,
|
| 263 |
+
inputs=[input_image, background_choice, foreground_ratio, back_groud_color],
|
| 264 |
+
outputs=[processed_image],
|
| 265 |
+
api_name="preprocess_image"
|
| 266 |
+
).success(
|
| 267 |
+
fn=gen_image,
|
| 268 |
+
inputs=[processed_image, seed, scale, step],
|
| 269 |
+
outputs=[output_image, output_xyz, output_glb],
|
| 270 |
+
api_name="gen_image"
|
| 271 |
)
|
| 272 |
|
| 273 |
+
# For Hugging Face Spaces, use minimal configuration
|
| 274 |
+
demo.queue().launch(
|
| 275 |
+
show_error=True # Only keep error display for debugging
|
| 276 |
+
)
|