Spaces:
Sleeping
Sleeping
Add generation step
Browse files
app.py
CHANGED
|
@@ -10,11 +10,30 @@ import gradio as gr
|
|
| 10 |
# Set Hugging Face API (needed for gated models)
|
| 11 |
hf_api_key = os.environ.get('HF_API_KEY')
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
# Load the image-to-text pipeline with BLIP model
|
| 14 |
get_itt_completion = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
|
| 15 |
|
| 16 |
# Text-to-image endpoint
|
| 17 |
-
get_tti_completion = pipeline("text-to-image", model="stabilityai/stable-diffusion-xl-base-1.0")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
# Bringing the functions from lessons 3 and 4!
|
| 20 |
def image_to_base64_str(pil_image):
|
|
@@ -37,10 +56,16 @@ def captioner(image):
|
|
| 37 |
|
| 38 |
return result[0]['generated_text']
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# Create Gradio interface
|
| 46 |
with gr.Blocks() as demo:
|
|
|
|
| 10 |
# Set Hugging Face API (needed for gated models)
|
| 11 |
hf_api_key = os.environ.get('HF_API_KEY')
|
| 12 |
|
| 13 |
+
# Load the Stable Diffusion pipeline
|
| 14 |
+
model_id = "sd-legacy/stable-diffusion-v1-5"
|
| 15 |
+
|
| 16 |
+
# Use the Euler scheduler here instead
|
| 17 |
+
scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
| 18 |
+
|
| 19 |
# Load the image-to-text pipeline with BLIP model
|
| 20 |
get_itt_completion = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
|
| 21 |
|
| 22 |
# Text-to-image endpoint
|
| 23 |
+
#get_tti_completion = pipeline("text-to-image", model="stabilityai/stable-diffusion-xl-base-1.0")
|
| 24 |
+
|
| 25 |
+
# Load the Stable Diffusion pipeline
|
| 26 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 27 |
+
model_id,
|
| 28 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, # Use float16 on GPU, float32 on CPU
|
| 29 |
+
scheduler=scheduler,
|
| 30 |
+
use_auth_token=hf_api_key # Required for gated model
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# Move pipeline to GPU if available
|
| 34 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 35 |
+
pipe = pipe.to(device)
|
| 36 |
+
|
| 37 |
|
| 38 |
# Bringing the functions from lessons 3 and 4!
|
| 39 |
def image_to_base64_str(pil_image):
|
|
|
|
| 56 |
|
| 57 |
return result[0]['generated_text']
|
| 58 |
|
| 59 |
+
# Generate function
|
| 60 |
+
def generate(prompt, steps):
|
| 61 |
+
# Generate image with Stable Diffusion
|
| 62 |
+
output = pipe(
|
| 63 |
+
prompt,
|
| 64 |
+
negative_prompt=None, # Handle empty negative prompt
|
| 65 |
+
num_inference_steps=25,
|
| 66 |
+
)
|
| 67 |
+
return output.images[0] # Return the first generated image (PIL format)
|
| 68 |
+
|
| 69 |
|
| 70 |
# Create Gradio interface
|
| 71 |
with gr.Blocks() as demo:
|