druvx13 commited on
Commit
ceb1278
·
verified ·
1 Parent(s): 4f89b26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -46
app.py CHANGED
@@ -2,70 +2,98 @@ import gradio as gr
2
  from diffusers import StableDiffusionPipeline
3
  import torch
4
  import os
 
 
5
 
6
- # Set environment variables for CPU optimization
7
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
8
- os.environ["HF_HOME"] = "./cache"
9
 
10
- # Load model with memory-efficient settings
 
 
 
 
 
11
  pipe = StableDiffusionPipeline.from_pretrained(
12
- "Heartsync/NSFW-Uncensored",
13
- torch_dtype=torch.float32, # Required for CPU
 
 
14
  low_cpu_mem_usage=True,
15
- use_safetensors=True,
16
- cache_dir="./model_cache"
17
- ).to("cpu")
18
 
19
- # Enable memory-saving techniques
20
- pipe.enable_attention_slicing()
21
- pipe.enable_vae_slicing()
 
 
22
 
23
- def generate_image(prompt, negative_prompt="", num_inference_steps=30, guidance_scale=7.5, num_images=1):
24
- """Generate images with NSFW model, ensuring safe memory usage"""
 
 
 
25
  try:
26
- images = pipe(
27
- prompt=prompt,
28
- negative_prompt=negative_prompt,
29
- num_inference_steps=num_inference_steps,
30
- guidance_scale=guidance_scale,
31
- num_images_per_prompt=num_images,
32
- generator=torch.Generator(device="cpu").manual_seed(42)
33
- ).images
34
-
35
- return images
36
  except Exception as e:
37
- return f"Error: {str(e)}. This model may require shorter prompts or lower resolution."
38
 
39
- # Define UI
40
- with gr.Blocks(theme="soft", css=".gr-box {border-color: #ff4b5c}") as demo:
41
- gr.Markdown("""
42
- # 🔞 NSFW-Uncensored Image Generator
43
- ⚠️ This model generates explicit content. Use responsibly and in compliance with local laws.
44
-
45
- Model: [Heartsync/NSFW-Uncensored](https://huggingface.co/Heartsync/NSFW-Uncensored )
46
- """)
 
 
 
47
 
48
  with gr.Row():
49
  with gr.Column():
50
- prompt = gr.Textbox(label="Prompt", placeholder="Describe your image...", lines=4)
51
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What to avoid...", lines=3)
52
-
 
 
53
  with gr.Row():
54
- num_images = gr.Slider(1, 4, value=1, step=1, label="Images to Generate")
55
- num_inference_steps = gr.Slider(20, 50, value=30, step=1, label="Generation Steps")
56
-
57
- guidance_scale = gr.Slider(1, 20, value=7.5, step=0.5, label="Guidance Scale")
58
-
 
 
 
 
59
  with gr.Column():
60
- output = gr.Gallery(label="Generated Images", columns=2, height=512)
 
 
 
 
 
 
 
 
61
 
62
- generate_btn = gr.Button("🎨 Generate NSFW Image", variant="primary")
63
 
64
  generate_btn.click(
65
  fn=generate_image,
66
- inputs=[prompt, negative_prompt, num_inference_steps, guidance_scale, num_images],
67
- outputs=output
68
  )
69
 
70
  if __name__ == "__main__":
71
- demo.queue(max_size=20).launch()
 
2
  from diffusers import StableDiffusionPipeline
3
  import torch
4
  import os
5
+ from PIL import Image
6
+ import warnings
7
 
8
+ # Suppress warnings for cleaner output
9
+ warnings.filterwarnings("ignore")
 
10
 
11
+ # Model loading configuration
12
+ MODEL_NAME = "Heartsync/NSFW-Uncensored"
13
+ CACHE_DIR = "./model_cache"
14
+ os.makedirs(CACHE_DIR, exist_ok=True)
15
+
16
+ # Load model with memory optimization
17
  pipe = StableDiffusionPipeline.from_pretrained(
18
+ MODEL_NAME,
19
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
20
+ cache_dir=CACHE_DIR,
21
+ revision="fp16" if torch.cuda.is_available() else None,
22
  low_cpu_mem_usage=True,
23
+ device_map="cpu"
24
+ )
 
25
 
26
+ # Safety disclaimer
27
+ DISCLAIMER = """
28
+ ⚠️ **Content Warning**
29
+ This model may generate explicit/sensitive content. By using this application, you agree to comply with [HuggingFace's Acceptable Use Policy](https://huggingface.co/policies/acceptable-use ).
30
+ """
31
 
32
+ def generate_image(prompt, width, height, num_inference_steps=30, guidance_scale=7.5):
33
+ """Generate image with safety checks and memory management"""
34
+ if not prompt.strip():
35
+ return None, "Please enter a prompt"
36
+
37
  try:
38
+ with torch.inference_mode():
39
+ result = pipe(
40
+ prompt=prompt,
41
+ width=width,
42
+ height=height,
43
+ num_inference_steps=num_inference_steps,
44
+ guidance_scale=guidance_scale
45
+ ).images[0]
46
+ return result.convert("RGB"), None
 
47
  except Exception as e:
48
+ return None, f"Error: {str(e)}"
49
 
50
+ # UI Configuration
51
+ ASPECT_RATIOS = {
52
+ "Square (512x512)": (512, 512),
53
+ "Portrait (512x768)": (512, 768),
54
+ "Landscape (768x512)": (768, 512),
55
+ "Phone (384x832)": (384, 832),
56
+ "Custom": None
57
+ }
58
+
59
+ with gr.Blocks(theme="soft", css=".disclaimer {color: #FF4B2B; font-size: 0.9em;}") as demo:
60
+ gr.Markdown("# 🔞 NSFW-Uncensored Text-to-Image Generator\n" + DISCLAIMER)
61
 
62
  with gr.Row():
63
  with gr.Column():
64
+ prompt = gr.Textbox(
65
+ label="Prompt",
66
+ placeholder="Describe your image...",
67
+ lines=3
68
+ )
69
  with gr.Row():
70
+ width = gr.Number(value=512, label="Width", interactive=True)
71
+ height = gr.Number(value=512, label="Height", interactive=True)
72
+ aspect_ratio = gr.Dropdown(
73
+ choices=list(ASPECT_RATIOS.keys()),
74
+ value="Square (512x512)",
75
+ label="Aspect Ratio"
76
+ )
77
+ generate_btn = gr.Button("Generate Image", variant="primary")
78
+
79
  with gr.Column():
80
+ output_image = gr.Image(label="Result", interactive=False)
81
+ error_msg = gr.Textbox(label="Status", visible=True)
82
+
83
+ def update_dimensions(choice):
84
+ if choice == "Custom":
85
+ return [gr.Number(interactive=True), gr.Number(interactive=True)]
86
+ else:
87
+ w, h = ASPECT_RATIOS[choice]
88
+ return [gr.Number(value=w, interactive=True), gr.Number(value=h, interactive=True)]
89
 
90
+ aspect_ratio.change(fn=update_dimensions, inputs=[aspect_ratio], outputs=[width, height])
91
 
92
  generate_btn.click(
93
  fn=generate_image,
94
+ inputs=[prompt, width, height],
95
+ outputs=[output_image, error_msg]
96
  )
97
 
98
  if __name__ == "__main__":
99
+ demo.launch()