Spaces:
Running
Running
feat: add test
Browse files- app.py +3 -0
- stable_diffusion_inference.py +5 -8
- test/test_api.py +23 -0
- test.py → test/test_inference.py +1 -1
app.py
CHANGED
|
@@ -22,6 +22,9 @@ examples = [
|
|
| 22 |
|
| 23 |
css = """
|
| 24 |
body {
|
|
|
|
|
|
|
|
|
|
| 25 |
background-image: url('https://lh3.googleusercontent.com/d/1y7H4WIjnBAcNvvi-3qOV_ORE-jMXP4fr');
|
| 26 |
background-repeat: no-repeat;
|
| 27 |
background-attachment: fixed;
|
|
|
|
| 22 |
|
| 23 |
css = """
|
| 24 |
body {
|
| 25 |
+
position: fixed;
|
| 26 |
+
top: 0;
|
| 27 |
+
left: 0;
|
| 28 |
background-image: url('https://lh3.googleusercontent.com/d/1y7H4WIjnBAcNvvi-3qOV_ORE-jMXP4fr');
|
| 29 |
background-repeat: no-repeat;
|
| 30 |
background-attachment: fixed;
|
stable_diffusion_inference.py
CHANGED
|
@@ -18,15 +18,17 @@ from utils import timer
|
|
| 18 |
|
| 19 |
model_path = 'Ngene787/Faice_text2face'
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
logger.info("Loading model ...")
|
| 23 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 24 |
if torch.cuda.is_available():
|
| 25 |
torch_dtype = torch.float16
|
| 26 |
else:
|
| 27 |
torch_dtype = torch.float32
|
| 28 |
-
# device = "cpu"
|
| 29 |
-
# torch_dtype = torch.float32
|
| 30 |
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch_dtype,
|
| 31 |
low_cpu_mem_usage=True,
|
| 32 |
# requires_safety_checker=False
|
|
@@ -37,11 +39,6 @@ pipe = accelerator.prepare(pipe)
|
|
| 37 |
# Enable memory-efficient attention
|
| 38 |
# pipe.enable_xformers_memory_efficient_attention()
|
| 39 |
|
| 40 |
-
# Enable attention slicing
|
| 41 |
-
# pipe.enable_attention_slicing()
|
| 42 |
-
|
| 43 |
-
# Enable VAE slicing
|
| 44 |
-
# pipe.enable_vae_slicing()
|
| 45 |
|
| 46 |
MAX_SEED = np.iinfo(np.int32).max
|
| 47 |
|
|
|
|
| 18 |
|
| 19 |
model_path = 'Ngene787/Faice_text2face'
|
| 20 |
|
| 21 |
+
if torch.backends.mps.is_available():
|
| 22 |
+
accelerator = Accelerator(gradient_accumulation_steps=1)
|
| 23 |
+
else:
|
| 24 |
+
accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
|
| 25 |
+
|
| 26 |
logger.info("Loading model ...")
|
| 27 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 28 |
if torch.cuda.is_available():
|
| 29 |
torch_dtype = torch.float16
|
| 30 |
else:
|
| 31 |
torch_dtype = torch.float32
|
|
|
|
|
|
|
| 32 |
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch_dtype,
|
| 33 |
low_cpu_mem_usage=True,
|
| 34 |
# requires_safety_checker=False
|
|
|
|
| 39 |
# Enable memory-efficient attention
|
| 40 |
# pipe.enable_xformers_memory_efficient_attention()
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
MAX_SEED = np.iinfo(np.int32).max
|
| 44 |
|
test/test_api.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: UTF-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
@Time : 30/05/2025 10:18
|
| 4 |
+
@Author : xiaoguangliang
|
| 5 |
+
@File : test_api.py
|
| 6 |
+
@Project : Faice_text2face
|
| 7 |
+
"""
|
| 8 |
+
from gradio_client import Client
|
| 9 |
+
|
| 10 |
+
client = Client("https://e517e907a9e4213655.gradio.live/")
|
| 11 |
+
|
| 12 |
+
prompt = "Portrait of a young woman with long wavy hair, soft studio lighting, high contrast, 4k resolution, professional headshot"
|
| 13 |
+
|
| 14 |
+
result = client.predict(
|
| 15 |
+
prompt=prompt,
|
| 16 |
+
negative_prompt="",
|
| 17 |
+
seed=0,
|
| 18 |
+
randomize_seed=False,
|
| 19 |
+
guidance_scale=7.5,
|
| 20 |
+
num_inference_steps=100,
|
| 21 |
+
api_name="/inference"
|
| 22 |
+
)
|
| 23 |
+
print(result)
|
test.py → test/test_inference.py
RENAMED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
"""
|
| 3 |
@Time : 28/05/2025 15:22
|
| 4 |
@Author : xiaoguangliang
|
| 5 |
-
@File :
|
| 6 |
@Project : Faice_text2face
|
| 7 |
"""
|
| 8 |
from stable_diffusion_inference import inference
|
|
|
|
| 2 |
"""
|
| 3 |
@Time : 28/05/2025 15:22
|
| 4 |
@Author : xiaoguangliang
|
| 5 |
+
@File : test_inference.py
|
| 6 |
@Project : Faice_text2face
|
| 7 |
"""
|
| 8 |
from stable_diffusion_inference import inference
|