Spaces:
Sleeping
Sleeping
Update App: Add Terrain Builder Tab
Browse files
app.py
CHANGED
|
@@ -11,43 +11,37 @@ import json
|
|
| 11 |
import base64
|
| 12 |
from pathlib import Path
|
| 13 |
from io import BytesIO
|
|
|
|
| 14 |
|
| 15 |
import gradio as gr
|
| 16 |
from PIL import Image
|
|
|
|
|
|
|
| 17 |
import spaces # HuggingFace Zero GPU
|
| 18 |
|
| 19 |
-
# For Zero GPU spaces, we use a simpler approach:
|
| 20 |
-
# Direct diffusers pipeline instead of ComfyUI backend
|
| 21 |
-
# This avoids ComfyUI's CUDA initialization issues
|
| 22 |
-
|
| 23 |
try:
|
| 24 |
import torch
|
| 25 |
-
from diffusers import StableDiffusionImg2ImgPipeline
|
| 26 |
DIFFUSERS_AVAILABLE = True
|
| 27 |
except ImportError:
|
| 28 |
DIFFUSERS_AVAILABLE = False
|
| 29 |
print("Diffusers not available, will use fallback")
|
| 30 |
|
| 31 |
-
|
| 32 |
# Global pipeline (loaded on first use)
|
| 33 |
_pipeline = None
|
| 34 |
_pipeline_lock = threading.Lock()
|
| 35 |
|
| 36 |
-
|
| 37 |
def get_pipeline():
|
| 38 |
"""Get or create the Stable Diffusion pipeline."""
|
| 39 |
global _pipeline
|
| 40 |
-
|
| 41 |
if _pipeline is not None:
|
| 42 |
return _pipeline
|
| 43 |
-
|
| 44 |
with _pipeline_lock:
|
| 45 |
if _pipeline is not None:
|
| 46 |
return _pipeline
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
# Use a lightweight model for Zero GPU
|
| 51 |
model_id = "runwayml/stable-diffusion-v1-5"
|
| 52 |
|
| 53 |
_pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
@@ -56,17 +50,11 @@ def get_pipeline():
|
|
| 56 |
safety_checker=None,
|
| 57 |
requires_safety_checker=False
|
| 58 |
)
|
| 59 |
-
|
| 60 |
if torch.cuda.is_available():
|
| 61 |
_pipeline = _pipeline.to("cuda")
|
| 62 |
-
|
| 63 |
-
else:
|
| 64 |
-
print("Pipeline loaded on CPU")
|
| 65 |
-
|
| 66 |
-
return _pipeline
|
| 67 |
-
|
| 68 |
|
| 69 |
-
@spaces.GPU(duration=60)
|
| 70 |
def enhance_image_gpu(
|
| 71 |
image: Image.Image,
|
| 72 |
prompt: str,
|
|
@@ -76,43 +64,27 @@ def enhance_image_gpu(
|
|
| 76 |
num_inference_steps: int = 25,
|
| 77 |
seed: int = -1
|
| 78 |
) -> Image.Image:
|
| 79 |
-
"""
|
| 80 |
-
Enhance an image using Stable Diffusion img2img.
|
| 81 |
-
This function runs on GPU via Zero GPU.
|
| 82 |
-
"""
|
| 83 |
if not DIFFUSERS_AVAILABLE:
|
| 84 |
-
return image
|
| 85 |
|
| 86 |
pipe = get_pipeline()
|
| 87 |
-
|
| 88 |
-
# Move to GPU if available
|
| 89 |
if torch.cuda.is_available():
|
| 90 |
pipe = pipe.to("cuda")
|
| 91 |
-
|
| 92 |
-
# Prepare image
|
| 93 |
if image.mode != "RGB":
|
| 94 |
image = image.convert("RGB")
|
| 95 |
-
|
| 96 |
-
# Resize
|
| 97 |
-
max_size = 512
|
| 98 |
-
ratio = min(max_size / image.width, max_size / image.height)
|
| 99 |
-
if ratio < 1:
|
| 100 |
-
new_size = (int(image.width * ratio), int(image.height * ratio))
|
| 101 |
-
image = image.resize(new_size, Image.LANCZOS)
|
| 102 |
-
|
| 103 |
-
# Ensure dimensions are multiples of 8
|
| 104 |
w, h = image.size
|
| 105 |
w = (w // 8) * 8
|
| 106 |
h = (h // 8) * 8
|
| 107 |
-
image = image.resize((w, h), Image.LANCZOS)
|
| 108 |
|
| 109 |
-
# Set seed
|
| 110 |
generator = None
|
| 111 |
if seed >= 0:
|
| 112 |
generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu")
|
| 113 |
generator.manual_seed(seed)
|
| 114 |
-
|
| 115 |
-
# Run inference
|
| 116 |
result = pipe(
|
| 117 |
prompt=prompt,
|
| 118 |
image=image,
|
|
@@ -125,120 +97,103 @@ def enhance_image_gpu(
|
|
| 125 |
|
| 126 |
return result
|
| 127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
-
def enhance_nwn_character(
|
| 130 |
-
input_image: Image.Image,
|
| 131 |
-
character_type: str,
|
| 132 |
-
denoise: float,
|
| 133 |
-
steps: int,
|
| 134 |
-
seed: int
|
| 135 |
-
) -> Image.Image:
|
| 136 |
-
"""
|
| 137 |
-
Enhance an NWN character screenshot to photorealistic quality.
|
| 138 |
-
"""
|
| 139 |
-
if input_image is None:
|
| 140 |
-
return None
|
| 141 |
-
|
| 142 |
-
prompt = f"photorealistic {character_type}, highly detailed facial features, 8k, cinematic lighting, professional character art, sharp focus, intricate armor details, fantasy portrait"
|
| 143 |
-
|
| 144 |
-
negative_prompt = "blurry, low quality, low poly, pixelated, cartoonish, anime style, deformed, bad anatomy, extra limbs, watermark, signature, text, simple, flat lighting"
|
| 145 |
-
|
| 146 |
-
result = enhance_image_gpu(
|
| 147 |
-
image=input_image,
|
| 148 |
-
prompt=prompt,
|
| 149 |
-
negative_prompt=negative_prompt,
|
| 150 |
-
strength=denoise,
|
| 151 |
-
guidance_scale=7.5,
|
| 152 |
-
num_inference_steps=steps,
|
| 153 |
-
seed=seed
|
| 154 |
-
)
|
| 155 |
-
|
| 156 |
-
return result
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
# Character presets for NWN
|
| 160 |
CHARACTER_PRESETS = [
|
| 161 |
"female elf paladin in ornate silver armor",
|
| 162 |
"male human warrior in plate armor",
|
| 163 |
-
"female human mage in flowing robes"
|
| 164 |
-
"male dwarf fighter in heavy armor",
|
| 165 |
-
"female half-elf ranger in leather armor",
|
| 166 |
-
"male elf wizard in arcane robes",
|
| 167 |
-
"female human cleric in white robes",
|
| 168 |
-
"male half-orc barbarian in fur armor",
|
| 169 |
]
|
| 170 |
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
|
| 177 |
-
|
|
|
|
| 178 |
|
| 179 |
-
|
| 180 |
-
|
| 181 |
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
input_image = gr.Image(type="pil", label="NWN Screenshot", height=400)
|
| 185 |
-
|
| 186 |
-
character_type = gr.Dropdown(
|
| 187 |
-
choices=CHARACTER_PRESETS,
|
| 188 |
-
value=CHARACTER_PRESETS[0],
|
| 189 |
-
label="Character Type",
|
| 190 |
-
allow_custom_value=True
|
| 191 |
-
)
|
| 192 |
-
|
| 193 |
-
with gr.Row():
|
| 194 |
-
denoise = gr.Slider(
|
| 195 |
-
minimum=0.3, maximum=0.9, value=0.65, step=0.05,
|
| 196 |
-
label="Enhancement Strength",
|
| 197 |
-
info="Lower = more faithful to original"
|
| 198 |
-
)
|
| 199 |
-
steps = gr.Slider(
|
| 200 |
-
minimum=15, maximum=40, value=25, step=5,
|
| 201 |
-
label="Quality Steps"
|
| 202 |
-
)
|
| 203 |
-
|
| 204 |
-
seed = gr.Number(value=-1, label="Seed (-1 for random)", precision=0)
|
| 205 |
-
|
| 206 |
-
enhance_btn = gr.Button("✨ Enhance Character", variant="primary", size="lg")
|
| 207 |
-
|
| 208 |
-
with gr.Column(scale=1):
|
| 209 |
-
output_image = gr.Image(type="pil", label="Enhanced Result", height=400)
|
| 210 |
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
inputs=[input_image, character_type, denoise, steps, seed],
|
| 214 |
-
outputs=output_image,
|
| 215 |
-
api_name="enhance" # This enables API access at /enhance
|
| 216 |
-
)
|
| 217 |
-
|
| 218 |
-
gr.Markdown("""
|
| 219 |
-
---
|
| 220 |
-
## API Usage
|
| 221 |
-
```python
|
| 222 |
-
from gradio_client import Client, handle_file
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
character_type="female elf paladin in ornate silver armor",
|
| 228 |
-
denoise=0.65,
|
| 229 |
-
steps=25,
|
| 230 |
-
seed=-1,
|
| 231 |
-
api_name="/enhance"
|
| 232 |
-
)
|
| 233 |
-
```
|
| 234 |
-
|
| 235 |
-
## Workflow
|
| 236 |
-
1. Take screenshot of NWN character
|
| 237 |
-
2. Upload and enhance here
|
| 238 |
-
3. Use enhanced image with Meshy.ai for 3D generation
|
| 239 |
-
4. Import 3D mesh to Unreal → Mesh to MetaHuman
|
| 240 |
-
""")
|
| 241 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
if __name__ == "__main__":
|
| 244 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 11 |
import base64
|
| 12 |
from pathlib import Path
|
| 13 |
from io import BytesIO
|
| 14 |
+
import random
|
| 15 |
|
| 16 |
import gradio as gr
|
| 17 |
from PIL import Image
|
| 18 |
+
import numpy as np
|
| 19 |
+
import cv2
|
| 20 |
import spaces # HuggingFace Zero GPU
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
try:
|
| 23 |
import torch
|
| 24 |
+
from diffusers import StableDiffusionImg2ImgPipeline
|
| 25 |
DIFFUSERS_AVAILABLE = True
|
| 26 |
except ImportError:
|
| 27 |
DIFFUSERS_AVAILABLE = False
|
| 28 |
print("Diffusers not available, will use fallback")
|
| 29 |
|
|
|
|
| 30 |
# Global pipeline (loaded on first use)
|
| 31 |
_pipeline = None
|
| 32 |
_pipeline_lock = threading.Lock()
|
| 33 |
|
|
|
|
| 34 |
def get_pipeline():
|
| 35 |
"""Get or create the Stable Diffusion pipeline."""
|
| 36 |
global _pipeline
|
|
|
|
| 37 |
if _pipeline is not None:
|
| 38 |
return _pipeline
|
| 39 |
+
|
| 40 |
with _pipeline_lock:
|
| 41 |
if _pipeline is not None:
|
| 42 |
return _pipeline
|
| 43 |
|
| 44 |
+
# Use v1-5 for general purpose (Terrain + Char)
|
|
|
|
|
|
|
| 45 |
model_id = "runwayml/stable-diffusion-v1-5"
|
| 46 |
|
| 47 |
_pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
|
|
| 50 |
safety_checker=None,
|
| 51 |
requires_safety_checker=False
|
| 52 |
)
|
|
|
|
| 53 |
if torch.cuda.is_available():
|
| 54 |
_pipeline = _pipeline.to("cuda")
|
| 55 |
+
return _pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
+
@spaces.GPU(duration=60)
|
| 58 |
def enhance_image_gpu(
|
| 59 |
image: Image.Image,
|
| 60 |
prompt: str,
|
|
|
|
| 64 |
num_inference_steps: int = 25,
|
| 65 |
seed: int = -1
|
| 66 |
) -> Image.Image:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
if not DIFFUSERS_AVAILABLE:
|
| 68 |
+
return image
|
| 69 |
|
| 70 |
pipe = get_pipeline()
|
|
|
|
|
|
|
| 71 |
if torch.cuda.is_available():
|
| 72 |
pipe = pipe.to("cuda")
|
| 73 |
+
|
|
|
|
| 74 |
if image.mode != "RGB":
|
| 75 |
image = image.convert("RGB")
|
| 76 |
+
|
| 77 |
+
# Resize Logic (Maintain aspect, Power of 8)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
w, h = image.size
|
| 79 |
w = (w // 8) * 8
|
| 80 |
h = (h // 8) * 8
|
| 81 |
+
image = image.resize((w, h), Image.Resampling.LANCZOS)
|
| 82 |
|
|
|
|
| 83 |
generator = None
|
| 84 |
if seed >= 0:
|
| 85 |
generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu")
|
| 86 |
generator.manual_seed(seed)
|
| 87 |
+
|
|
|
|
| 88 |
result = pipe(
|
| 89 |
prompt=prompt,
|
| 90 |
image=image,
|
|
|
|
| 97 |
|
| 98 |
return result
|
| 99 |
|
| 100 |
+
# --- CHARACTER LOGIC ---
|
| 101 |
+
def enhance_nwn_character(input_image, character_type, denoise, steps, seed):
|
| 102 |
+
if input_image is None: return None
|
| 103 |
+
prompt = f"photorealistic {character_type}, highly detailed, 8k, cinematic lighting"
|
| 104 |
+
neg = "blurry, low quality, low poly, bad anatomy, watermark, text"
|
| 105 |
+
return enhance_image_gpu(input_image, prompt, neg, denoise, 7.5, steps, seed)
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
CHARACTER_PRESETS = [
|
| 108 |
"female elf paladin in ornate silver armor",
|
| 109 |
"male human warrior in plate armor",
|
| 110 |
+
"female human mage in flowing robes"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
]
|
| 112 |
|
| 113 |
+
# --- TERRAIN LOGIC ---
|
| 114 |
+
def generate_noise_map(resolution=512, seed=-1):
|
| 115 |
+
if seed >= 0:
|
| 116 |
+
np.random.seed(seed)
|
| 117 |
+
# Simple fractal noise approximation
|
| 118 |
+
noise = np.random.rand(resolution, resolution).astype(np.float32)
|
| 119 |
+
# Blur to create "hills"
|
| 120 |
+
noise = cv2.GaussianBlur(noise, (101, 101), 0)
|
| 121 |
+
noise = (noise - noise.min()) / (noise.max() - noise.min())
|
| 122 |
+
return noise
|
| 123 |
+
|
| 124 |
+
def erosion_sim(heightmap, iterations=10):
|
| 125 |
+
# Fast blur-based erosion
|
| 126 |
+
for _ in range(iterations):
|
| 127 |
+
blurred = cv2.GaussianBlur(heightmap, (3, 3), 0)
|
| 128 |
+
# Mix: Enhance valleys, sharpen peaks?
|
| 129 |
+
# Simple: H_new = H - (H - Blur) * strength
|
| 130 |
+
heightmap = heightmap - (heightmap - blurred) * 0.1
|
| 131 |
+
return heightmap
|
| 132 |
+
|
| 133 |
+
def generate_terrain(seed, erosion_steps, ai_strength):
|
| 134 |
+
# 1. Base Noise
|
| 135 |
+
res = 512
|
| 136 |
+
h_map = generate_noise_map(res, seed)
|
| 137 |
+
|
| 138 |
+
# 2. Convert to Image for AI
|
| 139 |
+
img_pil = Image.fromarray((h_map * 255).astype(np.uint8)).convert("RGB")
|
| 140 |
+
|
| 141 |
+
# 3. AI Enhancement (Hallucinate details)
|
| 142 |
+
prompt = "high altitude aerial view of realistic mountain terrain heightmap, grayscale, erosion, geological details, 8k"
|
| 143 |
+
neg = "color, trees, water, buildings, roads, text, map overlay"
|
| 144 |
+
|
| 145 |
+
enhanced = enhance_image_gpu(
|
| 146 |
+
img_pil, prompt, neg, strength=ai_strength, seed=seed
|
| 147 |
+
)
|
| 148 |
|
| 149 |
+
# 4. Post-Process (16-bit conversion)
|
| 150 |
+
enhanced_np = np.array(enhanced.convert("L")).astype(np.float32) / 255.0
|
| 151 |
|
| 152 |
+
# 5. Erosion on AI result
|
| 153 |
+
eroded = erosion_sim(enhanced_np, erosion_steps)
|
| 154 |
|
| 155 |
+
# 6. Save as 16-bit
|
| 156 |
+
h_16 = (eroded * 65535).clip(0, 65535).astype(np.uint16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
+
out_path = "output_terrain.png"
|
| 159 |
+
cv2.imwrite(out_path, h_16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
+
# Return 8-bit preview and file path
|
| 162 |
+
preview = (eroded * 255).astype(np.uint8)
|
| 163 |
+
return Image.fromarray(preview), out_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
|
| 165 |
+
# --- APP UI ---
|
| 166 |
+
with gr.Blocks(title="DGG Suite (Zero GPU)", theme=gr.themes.Soft()) as demo:
|
| 167 |
+
gr.Markdown("# 🛠️ DGG Content Suite")
|
| 168 |
+
|
| 169 |
+
with gr.Tabs():
|
| 170 |
+
# TAB 1: CHARACTERS
|
| 171 |
+
with gr.Tab("Character Enhancer"):
|
| 172 |
+
with gr.Row():
|
| 173 |
+
with gr.Column():
|
| 174 |
+
c_in = gr.Image(type="pil", label="Input")
|
| 175 |
+
c_type = gr.Dropdown(CHARACTER_PRESETS, label="Type", value=CHARACTER_PRESETS[0], allow_custom_value=True)
|
| 176 |
+
c_str = gr.Slider(0.3, 1.0, 0.65, label="Strength")
|
| 177 |
+
c_seed = gr.Number(-1, label="Seed")
|
| 178 |
+
c_btn = gr.Button("Enhance", variant="primary")
|
| 179 |
+
with gr.Column():
|
| 180 |
+
c_out = gr.Image(label="Result")
|
| 181 |
+
c_btn.click(enhance_nwn_character, [c_in, c_type, c_str, gr.Number(25, visible=False), c_seed], c_out)
|
| 182 |
+
|
| 183 |
+
# TAB 2: TERRAIN
|
| 184 |
+
with gr.Tab("Terrain Builder"):
|
| 185 |
+
gr.Markdown("Generate 16-bit Heightmaps for UE5")
|
| 186 |
+
with gr.Row():
|
| 187 |
+
with gr.Column():
|
| 188 |
+
t_seed = gr.Number(-1, label="Seed")
|
| 189 |
+
t_iter = gr.Slider(0, 50, 10, label="Erosion Steps")
|
| 190 |
+
t_ai = gr.Slider(0.0, 1.0, 0.5, label="AI Upscale Strength")
|
| 191 |
+
t_btn = gr.Button("Generate Heightmap", variant="primary")
|
| 192 |
+
with gr.Column():
|
| 193 |
+
t_prev = gr.Image(label="Preview (8-bit)")
|
| 194 |
+
t_file = gr.File(label="Download 16-bit PNG")
|
| 195 |
+
|
| 196 |
+
t_btn.click(generate_terrain, [t_seed, t_iter, t_ai], [t_prev, t_file])
|
| 197 |
|
| 198 |
if __name__ == "__main__":
|
| 199 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|