Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import openai | |
| from dotenv import load_dotenv | |
| import os | |
| import requests | |
| from transformers import BlipProcessor, BlipForConditionalGeneration | |
| from PIL import Image | |
| load_dotenv() | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| model_id = "gpt-3.5-turbo" | |
| def gpt_image(prompt, n) -> str: | |
| response = openai.Image.create( | |
| prompt=prompt | |
| + " sticker style, with templated shapes like circle, background color should be transparent", | |
| n=n, | |
| size="256x256", | |
| ) | |
| return_list = [] | |
| for res in response["data"]: | |
| image_url = res["url"] | |
| return_list.append( | |
| Image.open(requests.get(image_url, stream=True).raw).convert("RGB") | |
| ) | |
| return return_list | |
| # test embed url = https://webplayer.momenti.tv/?project_id=1380564943&moment_info_id=1844984252 | |
| def display_giv(url): | |
| # for GIV | |
| # url = f"https://api.momenti.tv/v4/media/moment_infos/{moment_info_id}/thumbnail" | |
| response = requests.get(url) | |
| processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") | |
| model = BlipForConditionalGeneration.from_pretrained( | |
| "Salesforce/blip-image-captioning-large" | |
| ) | |
| raw_image = Image.open(requests.get(url, stream=True).raw).convert("RGB") | |
| # unconditional image captioning | |
| inputs = processor(raw_image, return_tensors="pt") | |
| out = model.generate(**inputs) | |
| unconditional_caption = processor.decode(out[0], skip_special_tokens=True) | |
| sticker_image_list = gpt_image(unconditional_caption, 5) | |
| gallery_list = [] | |
| for sticker in sticker_image_list: | |
| dup_image = raw_image.copy().convert("RGBA") | |
| # Resize the smaller image to fit within the bigger image | |
| small_image_resized = sticker.resize((256, 256)) | |
| # Convert the image to RGBA mode (adding an alpha channel for transparency) | |
| image_rgba = small_image_resized.convert("RGBA") | |
| # Create a new image with a transparent background | |
| transparent_image = Image.new("RGBA", image_rgba.size, (0, 0, 0, 0)) | |
| # Identify the background color from the non-object part of the image | |
| border_color = image_rgba.getpixel((1, 1))[:3] | |
| # Iterate through each pixel in the image and set the background color to transparent | |
| data = [] | |
| for pixel in image_rgba.getdata(): | |
| if ( | |
| pixel[:3] == border_color | |
| ): # Check if the pixel matches the background color | |
| data.append( | |
| (pixel[0], pixel[1], pixel[2], 0) | |
| ) # Set alpha to 0 (transparent) | |
| else: | |
| data.append(pixel) # Keep non-background pixels unchanged | |
| # Put the modified pixel data into the transparent image | |
| transparent_image.putdata(data) | |
| # Get the dimensions of the bigger image | |
| big_width, big_height = raw_image.size | |
| # Calculate the position for placing the smaller image at the bottom | |
| x_position = (big_width - 256) // 2 - 128 | |
| y_position = big_height - 256 - 128 | |
| # Paste the smaller image onto the bigger image | |
| dup_image.paste(transparent_image, (x_position, y_position), transparent_image) | |
| gallery_list.append(dup_image) | |
| return gallery_list | |
| if __name__ == "__main__": | |
| url = "https://api.momenti.tv/v4/media/moment_infos/1844984252/thumbnail" | |
| demo = gr.Interface( | |
| display_giv, | |
| "text", | |
| [ | |
| gr.Image(image_mode="RGBA"), | |
| gr.Image(image_mode="RGBA"), | |
| gr.Image(image_mode="RGBA"), | |
| gr.Image(image_mode="RGBA"), | |
| gr.Image(image_mode="RGBA"), | |
| ], | |
| examples=[[url]], | |
| ) | |
| demo.launch() | |