Spaces:
Sleeping
Sleeping
File size: 7,252 Bytes
2931976 da625d4 37d952b 45d1044 37d952b da625d4 37d952b da625d4 37d952b a23dac1 e5138ec da625d4 a23dac1 da625d4 4f19405 a23dac1 da625d4 1ed6566 37d952b da625d4 a23dac1 4f19405 523dbbb 5f834b8 4f19405 2d2a8dc 444c2a1 4f19405 444c2a1 4f19405 5f834b8 d2abec1 522336b d2abec1 522336b d2abec1 5f834b8 522336b 5f834b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import os
# from PIL import Image
# from io import BytesIO
HF_API_KEY = os.getenv("HF_API_KEY")
# def generate_image(prompt):
# """Generate image using HF Inference API."""
# url = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" #"https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2"
# headers = {"Authorization": f"Bearer {HF_API_KEY}"}
# response = requests.post(url, headers=headers, json={"inputs": prompt})
# return Image.open(BytesIO(response.content))
import requests
from PIL import Image, UnidentifiedImageError, ImageDraw, ImageFont
from io import BytesIO
# def generate_image(prompt):
# """
# Generate an image from a prompt via an API.
# Returns a PIL.Image object. If the API fails, returns a placeholder image.
# """
# try:
# api_url="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5"
# headers = {"Authorization": f"Bearer {HF_API_KEY}"}
# response = requests.post(api_url, headers=headers, json={"inputs": prompt.split('-')[-1]})
# # Check if the response is OK
# if response.status_code == 200:
# try:
# img = Image.open(BytesIO(response.content))
# return img
# except UnidentifiedImageError:
# print("API response is not a valid image.")
# else:
# print(f"API returned status {response.status_code}")
# print("Response content:", response.text[:200])
# except requests.RequestException as e:
# print(f"Request failed: {e}")
# # Fallback placeholder image
# print("Using placeholder image.")
# placeholder = Image.new("RGB", (512, 512), color=(200, 200, 200))
# draw = ImageDraw.Draw(placeholder)
# text = "Image\nNot\nAvailable"
# # Optional: Use a basic font
# try:
# font = ImageFont.load_default()
# w, h = draw.multiline_textsize(text, font=font)
# draw.multiline_text(
# ((512 - w) / 2, (512 - h) / 2), text, fill=(50, 50, 50), font=font, align="center"
# )
# except Exception:
# draw.text((50, 200), text, fill=(50, 50, 50))
# return placeholder
import os
import requests
from PIL import Image, UnidentifiedImageError, ImageDraw, ImageFont
from io import BytesIO
HF_API_KEY = os.getenv("HF_API_KEY")
def generate_imagee(prompt):
"""
Generate an image from the part of the prompt after the dash (-) via Hugging Face API.
Returns a PIL.Image object. If the API fails, returns a placeholder image.
"""
api_url = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5"
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
# Get the part after the dash
if "-" in prompt:
prompt_to_send = prompt.split("-", 1)[-1].strip()
else:
prompt_to_send = prompt.strip()
try:
response = requests.post(api_url, headers=headers, json={"inputs": 'A meme of a person trying to rent a house, but the landlord is being super picky and only wants to rent to people who can afford to pay a lot of money for a small space.'})#prompt_to_send})
if response.status_code == 200:
try:
img = Image.open(BytesIO(response.content))
return img
except UnidentifiedImageError:
print("API response is not a valid image.")
else:
print(f"API returned status {response.status_code}")
print("Response content:", response.text[:200])
except requests.RequestException as e:
print(f"Request failed: {e}")
# Fallback placeholder image
print("Using placeholder image.")
placeholder = Image.new("RGB", (512, 512), color=(200, 200, 200))
draw = ImageDraw.Draw(placeholder)
text = "Image\nNot\nAvailable"
try:
font = ImageFont.load_default()
w, h = draw.multiline_textsize(text, font=font)
draw.multiline_text(
((512 - w) / 2, (512 - h) / 2), text, fill=(50, 50, 50), font=font, align="center"
)
except Exception:
draw.text((50, 200), text, fill=(50, 50, 50))
return placeholder
# -------------------
from diffusers import StableDiffusionPipeline
import torch
model_id = "sd-legacy/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
# pipe = pipe.to("cuda")
def generate_imagee2(prompt):
# Get the part after the dash
print('PROMPT TO INTRODUCE TO STABLDIFFUSION:', prompt)
prompt_to_send = extract_assistant_response(prompt)
print('INTRODUCED: ', prompt_to_send)
# if "-" in prompt:
# prompt_to_send = prompt.split("-", 1)[-1].strip()
# else:
# prompt_to_send = prompt.strip()
# prompt = "a photo of an astronaut riding a horse on mars"
image = pipe(prompt_to_send).images[0]
return image
def extract_assistant_response(decoded_text):
if "<|im_start|>assistant" in decoded_text:
part = decoded_text.split("<|im_start|>assistant", 1)[1]
part = part.split("<|im_end|>", 1)[0]
return part.strip()
return decoded_text.strip()
# image.save("astronaut_rides_horse.png")
# @InProceedings{Rombach_2022_CVPR,
# author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
# title = {High-Resolution Image Synthesis With Latent Diffusion Models},
# booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
# month = {June},
# year = {2022},
# pages = {10684-10695}
# }
import torch
from diffusers import FluxPipeline
# pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
# pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
# def generate_image(prompt):
# print('GENERATING IMAGE')
# prompt =extract_assistant_response(prompt)# "A cat holding a sign that says hello world"
# image = pipe(
# prompt,
# height=1024,
# width=1024,
# guidance_scale=3.5,
# num_inference_steps=50,
# max_sequence_length=512,
# generator=torch.Generator("cpu").manual_seed(0)
# ).images[0]
# #image.save("flux-dev.png")
# return image
from diffusers import AutoPipelineForText2Image
import torch
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo", torch_dtype=torch.float16, variant="fp16")
# image = pipeline(prompt).images[0]
def generate_image(prompt):
print('GENERATING IMAGE')
prompt2 =extract_assistant_response(prompt)# "A cat holding a sign that says hello world"
print(prompt2)
image = pipe(prompt=prompt2, num_inference_steps=1, guidance_scale=0.0).images[0]
# image = pipe(
# prompt,
# height=1024,
# width=1024,
# guidance_scale=3.5,
# num_inference_steps=50,
# max_sequence_length=512,
# generator=torch.Generator("cpu").manual_seed(0)
# ).images[0]
#image.save("flux-dev.png")
return image
|