Update app.py
Browse files
app.py
CHANGED
|
@@ -1,31 +1,67 @@
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from llama_cpp import Llama
|
| 3 |
-
import os
|
| 4 |
|
| 5 |
-
#
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
#
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def generate_image(prompt):
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
with gr.Blocks() as demo:
|
| 20 |
-
gr.Markdown("
|
|
|
|
| 21 |
with gr.Row():
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
#
|
|
|
|
|
|
|
| 30 |
if __name__ == "__main__":
|
|
|
|
| 31 |
demo.launch()
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from diffusers import StableDiffusionPipeline
|
| 3 |
import gradio as gr
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
# ---------------------------------------------------------------------------
|
| 6 |
+
# 1. Load the Stable Diffusion model from Hugging Face
|
| 7 |
+
# - We specify "runwayml/stable-diffusion-v1-5" as an example.
|
| 8 |
+
# - Use "revision='fp16'" and "torch_dtype=torch.float16" to use the half-precision weights.
|
| 9 |
+
# - .to('cuda') if GPU is available, else .to('cpu').
|
| 10 |
+
# ---------------------------------------------------------------------------
|
| 11 |
+
try:
|
| 12 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 13 |
+
"eric707/jibjab",
|
| 14 |
+
revision="fp16",
|
| 15 |
+
torch_dtype=torch.float16
|
| 16 |
+
).to("cuda")
|
| 17 |
+
device = "cuda"
|
| 18 |
+
except:
|
| 19 |
+
# If CUDA is not available, fall back to CPU (VERY slow for SD, but works in a pinch).
|
| 20 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 21 |
+
"runwayml/stable-diffusion-v1-5",
|
| 22 |
+
revision="fp16"
|
| 23 |
+
# If you're on CPU, you might remove the torch_dtype for better compatibility:
|
| 24 |
+
# torch_dtype=torch.float16 -> Not recommended on CPU
|
| 25 |
+
).to("cpu")
|
| 26 |
+
device = "cpu"
|
| 27 |
|
| 28 |
+
# ---------------------------------------------------------------------------
|
| 29 |
+
# 2. Define a function to generate images given a prompt.
|
| 30 |
+
# - We'll keep things simple and only accept a single prompt string.
|
| 31 |
+
# - Feel free to modify the inference steps, guidance scale, image size, etc.
|
| 32 |
+
# ---------------------------------------------------------------------------
|
| 33 |
def generate_image(prompt):
|
| 34 |
+
# Lower the inference steps or guidance scale if you run out of memory
|
| 35 |
+
image = pipe(
|
| 36 |
+
prompt,
|
| 37 |
+
num_inference_steps=30,
|
| 38 |
+
guidance_scale=7.5
|
| 39 |
+
).images[0]
|
| 40 |
+
return image
|
| 41 |
|
| 42 |
+
# ---------------------------------------------------------------------------
|
| 43 |
+
# 3. Build the Gradio UI
|
| 44 |
+
# - We use a Textbox for user input,
|
| 45 |
+
# and an Image component for displaying the generated image.
|
| 46 |
+
# ---------------------------------------------------------------------------
|
| 47 |
with gr.Blocks() as demo:
|
| 48 |
+
gr.Markdown("## Stable Diffusion Image Generation")
|
| 49 |
+
|
| 50 |
with gr.Row():
|
| 51 |
+
with gr.Column():
|
| 52 |
+
prompt_input = gr.Textbox(
|
| 53 |
+
label="Enter a prompt to generate an image",
|
| 54 |
+
placeholder="A photo of an astronaut riding a horse on Mars"
|
| 55 |
+
)
|
| 56 |
+
generate_button = gr.Button("Generate Image")
|
| 57 |
+
with gr.Column():
|
| 58 |
+
output_image = gr.Image(label="Generated Image")
|
| 59 |
+
|
| 60 |
+
generate_button.click(fn=generate_image, inputs=prompt_input, outputs=output_image)
|
| 61 |
|
| 62 |
+
# ---------------------------------------------------------------------------
|
| 63 |
+
# 4. Launch the Gradio app
|
| 64 |
+
# ---------------------------------------------------------------------------
|
| 65 |
if __name__ == "__main__":
|
| 66 |
+
# By default, .launch() will pick up the PORT from the environment if on HF Spaces
|
| 67 |
demo.launch()
|