|
|
import gradio as gr |
|
|
import torch |
|
|
from diffusers import StableDiffusionPipeline as sdp |
|
|
from huggingface_hub import HfApi |
|
|
import os |
|
|
|
|
|
def cuda_availability(): |
|
|
x=torch.cuda.is_available() |
|
|
device='cpu' |
|
|
if x: |
|
|
device='cuda' |
|
|
return device |
|
|
|
|
|
print("Creating a Pipeline") |
|
|
pipeline=sdp.from_pretrained("stabilityai/stable-diffusion-2-1",torch_dtype=torch.float32) |
|
|
print("Pipeline has been created") |
|
|
print("Now checking if we can use CUDA or not") |
|
|
device=cuda_availability() |
|
|
pipeline.to(device) |
|
|
|
|
|
os.makedirs("outputs", exist_ok=True) |
|
|
|
|
|
def create_image(prompt): |
|
|
|
|
|
print("Create an Image") |
|
|
image=pipeline(prompt).images[0] |
|
|
|
|
|
fn=os.path.join("image.jpg") |
|
|
print("The Image was created succefully") |
|
|
image.save("outputs/my_image.png") |
|
|
|
|
|
api=HfApi() |
|
|
api.upload_file( |
|
|
path_or_fileobj="outputs/my_image.png", |
|
|
path_in_repo="outputs/my_image.png", |
|
|
repo_id="username/space_name", |
|
|
repo_type="space" |
|
|
) |
|
|
print("Image saved successfully") |
|
|
|
|
|
return image |
|
|
|
|
|
demo=gr.Interface(fn=create_image,inputs="text",outputs="image") |
|
|
demo.launch(share=True) |