bhartiyashesh commited on
Commit
96975ba
·
1 Parent(s): f0d7271

Deploy Interior AI Designer

Browse files
Files changed (3) hide show
  1. README.md +60 -9
  2. app.py +199 -397
  3. requirements.txt +12 -13
README.md CHANGED
@@ -1,14 +1,65 @@
1
  ---
2
  title: Interior AI Designer
3
  emoji: 🏠
4
- colorFrom: green
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.37.2
8
  app_file: app.py
9
- header: mini
10
- theme: bethecloud/storj_theme
11
- pinned: true
12
- license: apache-2.0
13
- short_description: Ikea could never
14
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: Interior AI Designer
3
  emoji: 🏠
4
+ colorFrom: blue
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.29.0
8
  app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ # 🏠 Interior AI Designer
14
+
15
+ Transform any room photo into beautiful interior designs with AI!
16
+
17
+ ## Features
18
+
19
+ - **16 Design Styles**: From Minimalistic to Japanese to Cyberpunk
20
+ - **Custom Prompts**: Add specific elements to your design
21
+ - **Public API**: Call programmatically from any application
22
+
23
+ ## Quick Start
24
+
25
+ 1. Upload a room photo
26
+ 2. Select a design style
27
+ 3. Click "Redesign Interior"
28
+ 4. Download your transformed image!
29
+
30
+ ## API Usage
31
+
32
+ ```python
33
+ from gradio_client import Client
34
+
35
+ client = Client("YOUR_USERNAME/interior-ai-designer")
36
+ result = client.predict(
37
+ image="room.jpg",
38
+ style="Scandinavian",
39
+ api_name="/redesign"
40
+ )
41
+ ```
42
+
43
+ ## Available Styles
44
+
45
+ - Minimalistic
46
+ - Boho
47
+ - Farmhouse
48
+ - Japanese
49
+ - Scandinavian
50
+ - Parisian
51
+ - Hollywood
52
+ - Beach
53
+ - Matrix
54
+ - And more...
55
+
56
+ ## Technology
57
+
58
+ - Stable Diffusion 1.5
59
+ - ControlNet (NormalBae)
60
+ - Gradio
61
+
62
+ ---
63
+
64
+ Made with ❤️ using Hugging Face Spaces
65
+
app.py CHANGED
@@ -1,39 +1,29 @@
1
- prod = False
2
- port = 8080
3
- show_options = False
4
- if prod:
5
- port = 8081
6
- # show_options = False
7
 
8
  import os
9
  import random
10
  import time
11
  import gradio as gr
12
  import numpy as np
13
- import spaces
14
- import imageio
15
- from huggingface_hub import HfApi
16
- import gc
17
  import torch
18
- import cv2
19
  from PIL import Image
20
  from diffusers import (
21
  ControlNetModel,
22
  DPMSolverMultistepScheduler,
23
  StableDiffusionControlNetPipeline,
24
- # StableDiffusionInpaintPipeline,
25
- # AutoencoderKL,
26
  )
27
  from controlnet_aux_local import NormalBaeDetector
28
 
29
  MAX_SEED = np.iinfo(np.int32).max
30
- API_KEY = os.environ.get("API_KEY", None)
31
- # os.environ['HF_HOME'] = '/data/.huggingface'
32
 
33
- print("CUDA version:", torch.version.cuda)
34
- print("loading everything")
35
- compiled = False
36
- api = HfApi()
37
 
38
  class Preprocessor:
39
  MODEL_ID = "lllyasviel/Annotators"
@@ -51,26 +41,26 @@ class Preprocessor:
51
  torch.cuda.empty_cache()
52
  self.name = name
53
  else:
54
- raise ValueError
55
- return
56
 
57
  def __call__(self, image: Image.Image, **kwargs) -> Image.Image:
58
  return self.model(image, **kwargs)
59
 
 
 
60
  if gr.NO_RELOAD:
61
- # Controlnet Normal
 
 
 
62
  model_id = "lllyasviel/control_v11p_sd15_normalbae"
63
- print("initializing controlnet")
64
  controlnet = ControlNetModel.from_pretrained(
65
  model_id,
66
  torch_dtype=torch.float16,
67
- attn_implementation="flash_attention_2",
68
  ).to("cuda")
69
 
70
  # Scheduler
71
  scheduler = DPMSolverMultistepScheduler.from_pretrained(
72
- # "runwayml/stable-diffusion-v1-5",
73
- # "stable-diffusion-v1-5/stable-diffusion-v1-5",
74
  "ashllay/stable-diffusion-v1-5-archive",
75
  solver_order=2,
76
  subfolder="scheduler",
@@ -80,372 +70,126 @@ if gr.NO_RELOAD:
80
  prediction_type="epsilon",
81
  thresholding=False,
82
  denoise_final=True,
83
- device_map="cuda",
84
  torch_dtype=torch.float16,
85
  )
86
 
87
- # Stable Diffusion Pipeline URL
88
- # base_model_url = "https://huggingface.co/broyang/hentaidigitalart_v20/blob/main/realcartoon3d_v15.safetensors"
89
  base_model_url = "https://huggingface.co/Lykon/AbsoluteReality/blob/main/AbsoluteReality_1.8.1_pruned.safetensors"
90
- # vae_url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"
91
-
92
- # print('loading vae')
93
- # vae = AutoencoderKL.from_single_file(vae_url, torch_dtype=torch.float16).to("cuda")
94
- # vae.to(memory_format=torch.channels_last)
95
-
96
- print('loading pipe')
97
  pipe = StableDiffusionControlNetPipeline.from_single_file(
98
  base_model_url,
99
  safety_checker=None,
100
  controlnet=controlnet,
101
  scheduler=scheduler,
102
- # vae=vae,
103
  torch_dtype=torch.float16,
104
  ).to("cuda")
105
-
106
- # print('loading inpainting pipe')
107
- # inpaint_pipe = StableDiffusionInpaintPipeline.from_pretrained(
108
- # "runwayml/stable-diffusion-inpainting",
109
- # torch_dtype=torch.float16,
110
- # ).to("cuda")
111
 
112
- print("loading preprocessor")
113
  preprocessor = Preprocessor()
114
  preprocessor.load("NormalBae")
115
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="EasyNegativeV2.safetensors", token="EasyNegativeV2",)
116
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="badhandv4.pt", token="badhandv4")
117
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="fcNeg-neg.pt", token="fcNeg-neg")
118
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Ahegao.pt", token="HDA_Ahegao")
119
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Bondage.pt", token="HDA_Bondage")
120
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_pet_play.pt", token="HDA_pet_play")
121
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_unconventional maid.pt", token="HDA_unconventional_maid")
122
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NakedHoodie.pt", token="HDA_NakedHoodie")
123
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NunDress.pt", token="HDA_NunDress")
124
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Shibari.pt", token="HDA_Shibari")
125
- pipe.to("cuda")
126
 
127
- print("---------------Loaded controlnet pipeline---------------")
 
 
 
 
 
 
 
 
128
  torch.cuda.empty_cache()
129
  gc.collect()
130
- print(f"CUDA memory allocated: {torch.cuda.max_memory_allocated(device='cuda') / 1e9:.2f} GB")
131
- print("Model Compiled!")
132
-
133
- def generate_furniture_mask(image, furniture_type):
134
- image_np = np.array(image)
135
- height, width = image_np.shape[:2]
136
-
137
- mask = np.zeros((height, width), dtype=np.uint8)
138
-
139
- if furniture_type == "sofa":
140
- cv2.rectangle(mask, (width//4, int(height*0.6)), (width*3//4, height), 255, -1)
141
- elif furniture_type == "table":
142
- cv2.rectangle(mask, (width//3, height//3), (width*2//3, height*2//3), 255, -1)
143
- elif furniture_type == "chair":
144
- cv2.circle(mask, (width*3//5, height*2//3), height//6, 255, -1)
145
-
146
- return Image.fromarray(mask)
147
 
148
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
149
- if randomize_seed:
150
- seed = random.randint(0, MAX_SEED)
151
- return seed
152
 
153
- def get_additional_prompt():
154
- prompt = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
155
- top = ["tank top", "blouse", "button up shirt", "sweater", "corset top"]
156
- bottom = ["short skirt", "athletic shorts", "jean shorts", "pleated skirt", "short skirt", "leggings", "high-waisted shorts"]
157
- accessory = ["knee-high boots", "gloves", "Thigh-high stockings", "Garter belt", "choker", "necklace", "headband", "headphones"]
158
- return f"{prompt}, {random.choice(top)}, {random.choice(bottom)}, {random.choice(accessory)}, score_9"
159
- # outfit = ["schoolgirl outfit", "playboy outfit", "red dress", "gala dress", "cheerleader outfit", "nurse outfit", "Kimono"]
160
 
161
- def get_prompt(prompt, additional_prompt):
162
- interior = "design-style interior designed (interior space),tungsten white balance,captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length"
163
- default = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
164
- default2 = f"professional 3d model {prompt},octane render,highly detailed,volumetric,dramatic lighting,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
165
- randomize = get_additional_prompt()
166
- # nude = "NSFW,((nude)),medium bare breasts,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
167
- # bodypaint = "((fully naked with no clothes)),nude naked seethroughxray,invisiblebodypaint,rating_newd,NSFW"
168
- lab_girl = "hyperrealistic photography, extremely detailed, shy assistant wearing minidress boots and gloves, laboratory background, score_9, 1girl"
169
- pet_play = "hyperrealistic photography, extremely detailed, playful, blush, glasses, collar, score_9, HDA_pet_play"
170
- bondage = "hyperrealistic photography, extremely detailed, submissive, glasses, score_9, HDA_Bondage"
171
- # ahegao = "((invisible clothing)), hyperrealistic photography,exposed vagina,sexy,nsfw,HDA_Ahegao"
172
- ahegao2 = "(invisiblebodypaint),rating_newd,HDA_Ahegao"
173
- athleisure = "hyperrealistic photography, extremely detailed, 1girl athlete, exhausted embarrassed sweaty,outdoors, ((athleisure clothing)), score_9"
174
- atompunk = "((atompunk world)), hyperrealistic photography, extremely detailed, short hair, bodysuit, glasses, neon cyberpunk background, score_9"
175
- maid = "hyperrealistic photography, extremely detailed, shy, blushing, score_9, pastel background, HDA_unconventional_maid"
176
- nundress = "hyperrealistic photography, extremely detailed, shy, blushing, fantasy background, score_9, HDA_NunDress"
177
- naked_hoodie = "hyperrealistic photography, extremely detailed, medium hair, cityscape, (neon lights), score_9, HDA_NakedHoodie"
178
- abg = "(1girl, asian body covered in words, words on body, tattoos of (words) on body),(masterpiece, best quality),medium breasts,(intricate details),unity 8k wallpaper,ultra detailed,(pastel colors),beautiful and aesthetic,see-through (clothes),detailed,solo"
179
- # shibari = "extremely detailed, hyperrealistic photography, earrings, blushing, lace choker, tattoo, medium hair, score_9, HDA_Shibari"
180
- shibari2 = "octane render, highly detailed, volumetric, HDA_Shibari"
181
-
182
- if prompt == "":
183
- girls = [randomize, pet_play, bondage, lab_girl, athleisure, atompunk, maid, nundress, naked_hoodie, abg, shibari2, ahegao2]
184
- prompts_nsfw = [abg, shibari2, ahegao2]
185
- prompt = f"{random.choice(girls)}"
186
- prompt = f"boho chic"
187
- # print(f"-------------{preset}-------------")
188
- else:
189
- prompt = f"Photo from Pinterest of {prompt} {interior}"
190
- # prompt = default2
191
- return f"{prompt} f{additional_prompt}"
192
 
193
- style_list = [
194
- {
195
- "name": "None",
196
- "prompt": ""
197
- },
198
- {
199
- "name": "Minimalistic",
200
- "prompt": "Minimalist interior design,clean lines,neutral colors,uncluttered space,functional furniture,lots of natural light"
201
- },
202
- {
203
- "name": "Boho",
204
- "prompt": "Bohemian chic interior,eclectic mix of patterns and textures,vintage furniture,plants,woven textiles,warm earthy colors"
205
- },
206
- {
207
- "name": "Farmhouse",
208
- "prompt": "Modern farmhouse interior,rustic wood elements,shiplap walls,neutral color palette,industrial accents,cozy textiles"
209
- },
210
- {
211
- "name": "Saudi Prince",
212
- "prompt": "Opulent gold interior,luxurious ornate furniture,crystal chandeliers,rich fabrics,marble floors,intricate Arabic patterns"
213
- },
214
- {
215
- "name": "Neoclassical",
216
- "prompt": "Neoclassical interior design,elegant columns,ornate moldings,symmetrical layout,refined furniture,muted color palette"
217
- },
218
- {
219
- "name": "Eclectic",
220
- "prompt": "Eclectic interior design,mix of styles and eras,bold color combinations,diverse furniture pieces,unique art objects"
221
- },
222
- {
223
- "name": "Parisian",
224
- "prompt": "Parisian apartment interior,all-white color scheme,ornate moldings,herringbone wood floors,elegant furniture,large windows"
225
- },
226
- {
227
- "name": "Hollywood",
228
- "prompt": "Hollywood Regency interior,glamorous and luxurious,bold colors,mirrored surfaces,velvet upholstery,gold accents"
229
- },
230
- {
231
- "name": "Scandinavian",
232
- "prompt": "Scandinavian interior design,light wood tones,white walls,minimalist furniture,cozy textiles,hygge atmosphere"
233
- },
234
- {
235
- "name": "Beach",
236
- "prompt": "Coastal beach house interior,light blue and white color scheme,weathered wood,nautical accents,sheer curtains,ocean view"
237
- },
238
- {
239
- "name": "Japanese",
240
- "prompt": "Traditional Japanese interior,tatami mats,shoji screens,low furniture,zen garden view,minimalist decor,natural materials"
241
- },
242
- {
243
- "name": "Midcentury Modern",
244
- "prompt": "Mid-century modern interior,1950s-60s style furniture,organic shapes,warm wood tones,bold accent colors,large windows"
245
- },
246
- {
247
- "name": "Retro Futurism",
248
- "prompt": "Neon (atompunk world) retro cyberpunk background",
249
- },
250
- {
251
- "name": "Texan",
252
- "prompt": "Western cowboy interior,rustic wood beams,leather furniture,cowhide rugs,antler chandeliers,southwestern patterns"
253
- },
254
- {
255
- "name": "Matrix",
256
- "prompt": "Futuristic cyberpunk interior,neon accent lighting,holographic plants,sleek black surfaces,advanced gaming setup,transparent screens,Blade Runner inspired decor,high-tech minimalist furniture"
257
- }
258
- ]
259
 
260
- styles = {k["name"]: (k["prompt"]) for k in style_list}
261
- STYLE_NAMES = list(styles.keys())
262
 
263
- def apply_style(style_name):
264
- if style_name in styles:
265
- p = styles.get(style_name, "none")
266
- return p
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
- css = """
270
- h1, h2, h3 {
271
- text-align: center;
272
- display: block;
273
- }
274
- footer {
275
- visibility: hidden;
276
- }
277
- .gradio-container {
278
- max-width: 1100px !important;
279
- }
280
- .gr-image {
281
- display: flex;
282
- justify-content: center;
283
- align-items: center;
284
- width: 100%;
285
- height: 512px;
286
- overflow: hidden;
287
- }
288
- .gr-image img {
289
- width: 100%;
290
- height: 100%;
291
- object-fit: cover;
292
- object-position: center;
293
- }
294
- """
295
- with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
296
- #############################################################################
297
- with gr.Row():
298
- with gr.Accordion("Advanced options", open=show_options, visible=show_options):
299
- num_images = gr.Slider(
300
- label="Images", minimum=1, maximum=4, value=1, step=1
301
- )
302
- image_resolution = gr.Slider(
303
- label="Image resolution",
304
- minimum=256,
305
- maximum=1024,
306
- value=1024,
307
- step=256,
308
- )
309
- preprocess_resolution = gr.Slider(
310
- label="Preprocess resolution",
311
- minimum=128,
312
- maximum=1024,
313
- value=1024,
314
- step=1,
315
- )
316
- num_steps = gr.Slider(
317
- label="Number of steps", minimum=1, maximum=100, value=15, step=1
318
- ) # 20/4.5 or 12 without lora, 4 with lora
319
- guidance_scale = gr.Slider(
320
- label="Guidance scale", minimum=0.1, maximum=30.0, value=5.5, step=0.1
321
- ) # 5 without lora, 2 with lora
322
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
323
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
324
- a_prompt = gr.Textbox(
325
- label="Additional prompt",
326
- value = "design-style interior designed (interior space), tungsten white balance, captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length"
327
- )
328
- n_prompt = gr.Textbox(
329
- label="Negative prompt",
330
- value="EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)",
331
- )
332
- #############################################################################
333
- # input text
334
- with gr.Column():
335
- prompt = gr.Textbox(
336
- label="Custom Design",
337
- placeholder="Enter a description (optional)",
338
- )
339
- # design options
340
- with gr.Row(visible=True):
341
- style_selection = gr.Radio(
342
- show_label=True,
343
- container=True,
344
- interactive=True,
345
- choices=STYLE_NAMES,
346
- value="None",
347
- label="Design Styles",
348
- )
349
- # input image
350
- with gr.Row(equal_height=True):
351
- with gr.Column(scale=1, min_width=300):
352
- image = gr.Image(
353
- label="Input",
354
- sources=["upload"],
355
- show_label=True,
356
- mirror_webcam=True,
357
- type="pil",
358
- )
359
- # run button
360
- with gr.Column():
361
- run_button = gr.Button(value="Use this one", size="lg", visible=False)
362
- # output image
363
- with gr.Column(scale=1, min_width=300):
364
- result = gr.Image(
365
- label="Output",
366
- interactive=False,
367
- type="pil",
368
- show_share_button= False,
369
- )
370
- # Use this image button
371
- with gr.Column():
372
- use_ai_button = gr.Button(value="Use this one", size="lg", visible=False)
373
- config = [
374
- image,
375
- style_selection,
376
- prompt,
377
- a_prompt,
378
- n_prompt,
379
- num_images,
380
- image_resolution,
381
- preprocess_resolution,
382
- num_steps,
383
- guidance_scale,
384
- seed,
385
- ]
386
-
387
- with gr.Row():
388
- helper_text = gr.Markdown("## Tap and hold (on mobile) to save the image.", visible=True)
389
-
390
- # image processing
391
- @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config, outputs=result, show_progress="minimal")
392
- def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
393
- return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
394
-
395
- # AI image processing
396
- @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="minimal")
397
- def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
398
- # First, yield the previous result to update the input image immediately
399
- yield previous_result, gr.update()
400
- # Then, process the new input image
401
- new_result = process_image(previous_result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
402
- # Finally, yield the new result
403
- yield previous_result, new_result
404
-
405
- # Turn off buttons when processing
406
- @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None, outputs=[run_button, use_ai_button], show_progress="hidden")
407
- def turn_buttons_off():
408
- return gr.update(visible=False), gr.update(visible=False)
409
 
410
- # Turn on buttons when processing is complete
411
- @gr.on(triggers=[result.change], inputs=None, outputs=[use_ai_button, run_button], show_progress="hidden")
412
- def turn_buttons_on():
413
- return gr.update(visible=True), gr.update(visible=True)
414
-
415
- @spaces.GPU(duration=12)
416
- @torch.inference_mode()
417
- def process_image(
418
- image,
419
- style_selection,
420
- prompt,
421
- a_prompt,
422
- n_prompt,
423
- num_images,
424
- image_resolution,
425
- preprocess_resolution,
426
- num_steps,
427
- guidance_scale,
428
- seed,
429
- ):
430
- seed = random.randint(0, MAX_SEED)
431
  generator = torch.cuda.manual_seed(seed)
432
 
 
433
  preprocessor.load("NormalBae")
434
  control_image = preprocessor(
435
  image=image,
436
  image_resolution=image_resolution,
437
- detect_resolution=preprocess_resolution,
438
  )
439
 
440
- if style_selection is not None and style_selection != "None":
441
- prompt = f"Photo from Pinterest of {apply_style(style_selection)} {prompt},{a_prompt}"
 
 
 
 
 
442
  else:
443
- prompt = str(get_prompt(prompt, a_prompt))
444
- negative_prompt = str(n_prompt)
445
- print(prompt)
446
 
447
- # Generate the initial room image
448
- initial_result = pipe(
 
 
 
 
449
  prompt=prompt,
450
  negative_prompt=negative_prompt,
451
  guidance_scale=guidance_scale,
@@ -455,51 +199,109 @@ def process_image(
455
  image=control_image,
456
  ).images[0]
457
 
458
- # # Randomly choose whether to add furniture and which type
459
- # furniture_types = ["sofa", "table", "chair", "dresser", "bookshelf", "desk", "coffee table"]
460
- # furniture_type = random.choice(furniture_types)
461
 
 
 
 
 
 
 
 
 
 
 
462
 
463
- # furniture_mask = generate_furniture_mask(initial_result, furniture_type)
464
- # furniture_prompt = f"{prompt}, with a {furniture_type} in the style of {style_selection}"
465
- # print(furniture_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466
 
467
- # # Use the inpainting pipeline to add furniture
468
- # final_result = inpaint_pipe(
469
- # prompt=furniture_prompt,
470
- # image=initial_result,
471
- # mask_image=furniture_mask,
472
- # negative_prompt=negative_prompt,
473
- # num_inference_steps=num_steps,
474
- # guidance_scale=guidance_scale,
475
- # generator=generator,
476
- # ).images[0]
 
 
477
 
478
- # Save and upload results
479
- timestamp = int(time.time())
480
- img_path = f"{timestamp}.jpg"
481
- results_path = f"{timestamp}_out.jpg"
482
- imageio.imsave(img_path, image)
483
- imageio.imsave(results_path, initial_result)
484
- api.upload_file(
485
- path_or_fileobj=img_path,
486
- path_in_repo=img_path,
487
- repo_id="broyang/interior-ai-outputs",
488
- repo_type="dataset",
489
- token=API_KEY,
490
- run_as_future=True,
491
  )
492
- api.upload_file(
493
- path_or_fileobj=results_path,
494
- path_in_repo=results_path,
495
- repo_id="broyang/interior-ai-outputs",
496
- repo_type="dataset",
497
- token=API_KEY,
498
- run_as_future=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
499
  )
500
- return initial_result
 
 
 
501
 
502
- if prod:
503
- demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
504
- else:
505
- demo.queue(api_open=False).launch(show_api=False)
 
 
 
1
+ """
2
+ Interior AI Designer API
3
+ Hugging Face Spaces deployment with public API endpoint
4
+ """
 
 
5
 
6
  import os
7
  import random
8
  import time
9
  import gradio as gr
10
  import numpy as np
11
+ import spaces # Required for Hugging Face Spaces GPU
 
 
 
12
  import torch
13
+ import gc
14
  from PIL import Image
15
  from diffusers import (
16
  ControlNetModel,
17
  DPMSolverMultistepScheduler,
18
  StableDiffusionControlNetPipeline,
 
 
19
  )
20
  from controlnet_aux_local import NormalBaeDetector
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
 
 
23
 
24
+ # ============================================================
25
+ # Model Loading (runs once at startup)
26
+ # ============================================================
 
27
 
28
  class Preprocessor:
29
  MODEL_ID = "lllyasviel/Annotators"
 
41
  torch.cuda.empty_cache()
42
  self.name = name
43
  else:
44
+ raise ValueError(f"Unknown preprocessor: {name}")
 
45
 
46
  def __call__(self, image: Image.Image, **kwargs) -> Image.Image:
47
  return self.model(image, **kwargs)
48
 
49
+
50
+ # Load models at startup
51
  if gr.NO_RELOAD:
52
+ print("CUDA version:", torch.version.cuda)
53
+ print("Loading models...")
54
+
55
+ # ControlNet
56
  model_id = "lllyasviel/control_v11p_sd15_normalbae"
 
57
  controlnet = ControlNetModel.from_pretrained(
58
  model_id,
59
  torch_dtype=torch.float16,
 
60
  ).to("cuda")
61
 
62
  # Scheduler
63
  scheduler = DPMSolverMultistepScheduler.from_pretrained(
 
 
64
  "ashllay/stable-diffusion-v1-5-archive",
65
  solver_order=2,
66
  subfolder="scheduler",
 
70
  prediction_type="epsilon",
71
  thresholding=False,
72
  denoise_final=True,
 
73
  torch_dtype=torch.float16,
74
  )
75
 
76
+ # Stable Diffusion Pipeline
 
77
  base_model_url = "https://huggingface.co/Lykon/AbsoluteReality/blob/main/AbsoluteReality_1.8.1_pruned.safetensors"
78
+
 
 
 
 
 
 
79
  pipe = StableDiffusionControlNetPipeline.from_single_file(
80
  base_model_url,
81
  safety_checker=None,
82
  controlnet=controlnet,
83
  scheduler=scheduler,
 
84
  torch_dtype=torch.float16,
85
  ).to("cuda")
 
 
 
 
 
 
86
 
87
+ # Preprocessor
88
  preprocessor = Preprocessor()
89
  preprocessor.load("NormalBae")
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ # Optional: Load textual inversions for better negative prompts
92
+ try:
93
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="EasyNegativeV2.safetensors", token="EasyNegativeV2")
94
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="badhandv4.pt", token="badhandv4")
95
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="fcNeg-neg.pt", token="fcNeg-neg")
96
+ except Exception as e:
97
+ print(f"Could not load textual inversions: {e}")
98
+
99
+ pipe.to("cuda")
100
  torch.cuda.empty_cache()
101
  gc.collect()
102
+ print(f"Models loaded! CUDA memory: {torch.cuda.max_memory_allocated(device='cuda') / 1e9:.2f} GB")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
 
 
 
 
104
 
105
+ # ============================================================
106
+ # Style Definitions
107
+ # ============================================================
 
 
 
 
108
 
109
+ STYLE_LIST = [
110
+ {"name": "None", "prompt": ""},
111
+ {"name": "Minimalistic", "prompt": "Minimalist interior design,clean lines,neutral colors,uncluttered space,functional furniture,lots of natural light"},
112
+ {"name": "Boho", "prompt": "Bohemian chic interior,eclectic mix of patterns and textures,vintage furniture,plants,woven textiles,warm earthy colors"},
113
+ {"name": "Farmhouse", "prompt": "Modern farmhouse interior,rustic wood elements,shiplap walls,neutral color palette,industrial accents,cozy textiles"},
114
+ {"name": "Saudi Prince", "prompt": "Opulent gold interior,luxurious ornate furniture,crystal chandeliers,rich fabrics,marble floors,intricate Arabic patterns"},
115
+ {"name": "Neoclassical", "prompt": "Neoclassical interior design,elegant columns,ornate moldings,symmetrical layout,refined furniture,muted color palette"},
116
+ {"name": "Eclectic", "prompt": "Eclectic interior design,mix of styles and eras,bold color combinations,diverse furniture pieces,unique art objects"},
117
+ {"name": "Parisian", "prompt": "Parisian apartment interior,all-white color scheme,ornate moldings,herringbone wood floors,elegant furniture,large windows"},
118
+ {"name": "Hollywood", "prompt": "Hollywood Regency interior,glamorous and luxurious,bold colors,mirrored surfaces,velvet upholstery,gold accents"},
119
+ {"name": "Scandinavian", "prompt": "Scandinavian interior design,light wood tones,white walls,minimalist furniture,cozy textiles,hygge atmosphere"},
120
+ {"name": "Beach", "prompt": "Coastal beach house interior,light blue and white color scheme,weathered wood,nautical accents,sheer curtains,ocean view"},
121
+ {"name": "Japanese", "prompt": "Traditional Japanese interior,tatami mats,shoji screens,low furniture,zen garden view,minimalist decor,natural materials"},
122
+ {"name": "Midcentury Modern", "prompt": "Mid-century modern interior,1950s-60s style furniture,organic shapes,warm wood tones,bold accent colors,large windows"},
123
+ {"name": "Retro Futurism", "prompt": "Neon (atompunk world) retro cyberpunk background"},
124
+ {"name": "Texan", "prompt": "Western cowboy interior,rustic wood beams,leather furniture,cowhide rugs,antler chandeliers,southwestern patterns"},
125
+ {"name": "Matrix", "prompt": "Futuristic cyberpunk interior,neon accent lighting,holographic plants,sleek black surfaces,advanced gaming setup,transparent screens,Blade Runner inspired decor,high-tech minimalist furniture"},
126
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+ STYLES = {s["name"]: s["prompt"] for s in STYLE_LIST}
129
+ STYLE_NAMES = list(STYLES.keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
 
 
131
 
132
+ # ============================================================
133
+ # Core Processing Function (API Endpoint)
134
+ # ============================================================
 
135
 
136
+ @spaces.GPU(duration=20)
137
+ @torch.inference_mode()
138
+ def redesign_interior(
139
+ image: Image.Image,
140
+ style: str = "Minimalistic",
141
+ custom_prompt: str = "",
142
+ num_steps: int = 15,
143
+ guidance_scale: float = 5.5,
144
+ seed: int = -1,
145
+ image_resolution: int = 768,
146
+ ) -> Image.Image:
147
+ """
148
+ Redesign an interior image with the specified style.
149
 
150
+ Args:
151
+ image: Input room/interior image (PIL Image)
152
+ style: Design style name (e.g., "Minimalistic", "Boho", "Japanese")
153
+ custom_prompt: Additional custom prompt to append
154
+ num_steps: Number of inference steps (default: 15)
155
+ guidance_scale: Guidance scale for generation (default: 5.5)
156
+ seed: Random seed (-1 for random)
157
+ image_resolution: Output image resolution (default: 768)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
+ Returns:
160
+ Redesigned interior image (PIL Image)
161
+ """
162
+ # Set seed
163
+ if seed == -1:
164
+ seed = random.randint(0, MAX_SEED)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  generator = torch.cuda.manual_seed(seed)
166
 
167
+ # Preprocess image with NormalBae
168
  preprocessor.load("NormalBae")
169
  control_image = preprocessor(
170
  image=image,
171
  image_resolution=image_resolution,
172
+ detect_resolution=image_resolution,
173
  )
174
 
175
+ # Build prompt
176
+ base_prompt = "Photo from Pinterest of"
177
+ style_prompt = STYLES.get(style, "")
178
+ additional_prompt = "design-style interior designed (interior space), tungsten white balance, captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length"
179
+
180
+ if style_prompt:
181
+ prompt = f"{base_prompt} {style_prompt} {custom_prompt}, {additional_prompt}"
182
  else:
183
+ prompt = f"{base_prompt} {custom_prompt}, {additional_prompt}" if custom_prompt else f"boho chic interior, {additional_prompt}"
184
+
185
+ negative_prompt = "EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)"
186
 
187
+ print(f"Prompt: {prompt}")
188
+ print(f"Style: {style}, Seed: {seed}")
189
+
190
+ # Generate
191
+ start = time.time()
192
+ result = pipe(
193
  prompt=prompt,
194
  negative_prompt=negative_prompt,
195
  guidance_scale=guidance_scale,
 
199
  image=control_image,
200
  ).images[0]
201
 
202
+ print(f"Generation completed in {time.time() - start:.2f}s")
203
+ torch.cuda.empty_cache()
 
204
 
205
+ return result
206
+
207
+
208
+ # ============================================================
209
+ # Gradio Interface
210
+ # ============================================================
211
+
212
+ with gr.Blocks() as demo:
213
+ gr.Markdown("# 🏠 Interior AI Designer")
214
+ gr.Markdown("Upload a room photo and select a design style to reimagine your space!")
215
 
216
+ with gr.Row():
217
+ with gr.Column():
218
+ input_image = gr.Image(
219
+ label="Upload Room Image",
220
+ type="pil",
221
+ sources=["upload", "clipboard"],
222
+ )
223
+ style_dropdown = gr.Dropdown(
224
+ label="Design Style",
225
+ choices=STYLE_NAMES,
226
+ value="Minimalistic",
227
+ )
228
+ custom_prompt = gr.Textbox(
229
+ label="Custom Prompt (optional)",
230
+ placeholder="Add specific details like 'with plants' or 'blue accents'",
231
+ )
232
+
233
+ with gr.Accordion("Advanced Options", open=False):
234
+ num_steps = gr.Slider(
235
+ label="Inference Steps",
236
+ minimum=10, maximum=50, value=15, step=1,
237
+ )
238
+ guidance_scale = gr.Slider(
239
+ label="Guidance Scale",
240
+ minimum=1.0, maximum=20.0, value=5.5, step=0.5,
241
+ )
242
+ seed = gr.Slider(
243
+ label="Seed (-1 for random)",
244
+ minimum=-1, maximum=MAX_SEED, value=-1, step=1,
245
+ )
246
+ image_resolution = gr.Slider(
247
+ label="Resolution",
248
+ minimum=512, maximum=1024, value=768, step=128,
249
+ )
250
+
251
+ generate_btn = gr.Button("🎨 Redesign Interior", variant="primary", size="lg")
252
+
253
+ with gr.Column():
254
+ output_image = gr.Image(label="Redesigned Interior", type="pil")
255
 
256
+ # Examples
257
+ gr.Examples(
258
+ examples=[
259
+ ["Minimalistic"],
260
+ ["Boho"],
261
+ ["Japanese"],
262
+ ["Scandinavian"],
263
+ ["Matrix"],
264
+ ],
265
+ inputs=[style_dropdown],
266
+ label="Try these styles",
267
+ )
268
 
269
+ # Connect the button to the function
270
+ generate_btn.click(
271
+ fn=redesign_interior,
272
+ inputs=[input_image, style_dropdown, custom_prompt, num_steps, guidance_scale, seed, image_resolution],
273
+ outputs=output_image,
274
+ api_name="redesign", # This enables the API endpoint
 
 
 
 
 
 
 
275
  )
276
+
277
+ gr.Markdown("""
278
+ ---
279
+ ### 📡 API Usage
280
+
281
+ This app exposes a public API! You can call it programmatically:
282
+
283
+ ```python
284
+ from gradio_client import Client
285
+
286
+ client = Client("YOUR_USERNAME/interior-ai-designer")
287
+ result = client.predict(
288
+ image="path/to/room.jpg",
289
+ style="Minimalistic",
290
+ custom_prompt="",
291
+ num_steps=15,
292
+ guidance_scale=5.5,
293
+ seed=-1,
294
+ image_resolution=768,
295
+ api_name="/redesign"
296
  )
297
+ print(result) # Path to output image
298
+ ```
299
+ """)
300
+
301
 
302
+ # Launch
303
+ if __name__ == "__main__":
304
+ demo.queue(max_size=10).launch(
305
+ server_name="0.0.0.0",
306
+ server_port=7860,
307
+ )
requirements.txt CHANGED
@@ -1,13 +1,12 @@
1
- torch
2
- torchvision
3
- diffusers
4
- einops
5
- huggingface-hub
6
- mediapipe
7
- opencv-python-headless
8
- safetensors
9
- transformers
10
- xformers
11
- accelerate
12
- imageio
13
- # controlnet-aux
 
1
+ torch
2
+ torchvision
3
+ diffusers>=0.25.0
4
+ einops
5
+ huggingface-hub
6
+ opencv-python-headless
7
+ safetensors
8
+ transformers>=4.36.0
9
+ accelerate
10
+ timm
11
+ spaces
12
+