Spaces:
Running
Running
feat: run locally and change parameters
Browse files- app.py +1 -2
- stable_diffusion_inference.py +3 -2
app.py
CHANGED
|
@@ -26,7 +26,6 @@ body {
|
|
| 26 |
background-repeat: no-repeat;
|
| 27 |
background-attachment: fixed;
|
| 28 |
background-position: center;
|
| 29 |
-
backdrop-filter: blur(3px);
|
| 30 |
min-height: 100vh;
|
| 31 |
}
|
| 32 |
#col-container {
|
|
@@ -74,7 +73,7 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
| 74 |
with gr.Column(elem_id="col-container"):
|
| 75 |
gr.HTML("""
|
| 76 |
<div align="center" style="margin-bottom: 20px;">
|
| 77 |
-
<img src='Faice.png' width="160">
|
| 78 |
<p style="font-size: 16px; max-width: 960px; margin: 5px auto;">
|
| 79 |
Human Faces Generation with Diffusion Models.
|
| 80 |
</p>
|
|
|
|
| 26 |
background-repeat: no-repeat;
|
| 27 |
background-attachment: fixed;
|
| 28 |
background-position: center;
|
|
|
|
| 29 |
min-height: 100vh;
|
| 30 |
}
|
| 31 |
#col-container {
|
|
|
|
| 73 |
with gr.Column(elem_id="col-container"):
|
| 74 |
gr.HTML("""
|
| 75 |
<div align="center" style="margin-bottom: 20px;">
|
| 76 |
+
<img src='/gradio_api/file=Faice.png' width="160">
|
| 77 |
<p style="font-size: 16px; max-width: 960px; margin: 5px auto;">
|
| 78 |
Human Faces Generation with Diffusion Models.
|
| 79 |
</p>
|
stable_diffusion_inference.py
CHANGED
|
@@ -18,7 +18,8 @@ from utils import timer
|
|
| 18 |
|
| 19 |
model_path = 'Ngene787/Faice_text2face'
|
| 20 |
|
| 21 |
-
accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
|
|
|
|
| 22 |
logger.info("Loading model ...")
|
| 23 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 24 |
if torch.cuda.is_available():
|
|
@@ -35,7 +36,7 @@ pipe = pipe.to(device)
|
|
| 35 |
|
| 36 |
pipe = accelerator.prepare(pipe)
|
| 37 |
# Enable memory-efficient attention
|
| 38 |
-
pipe.enable_xformers_memory_efficient_attention()
|
| 39 |
|
| 40 |
# Enable attention slicing
|
| 41 |
pipe.enable_attention_slicing()
|
|
|
|
| 18 |
|
| 19 |
model_path = 'Ngene787/Faice_text2face'
|
| 20 |
|
| 21 |
+
# accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
|
| 22 |
+
accelerator = Accelerator(gradient_accumulation_steps=1)
|
| 23 |
logger.info("Loading model ...")
|
| 24 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 25 |
if torch.cuda.is_available():
|
|
|
|
| 36 |
|
| 37 |
pipe = accelerator.prepare(pipe)
|
| 38 |
# Enable memory-efficient attention
|
| 39 |
+
# pipe.enable_xformers_memory_efficient_attention()
|
| 40 |
|
| 41 |
# Enable attention slicing
|
| 42 |
pipe.enable_attention_slicing()
|