| import os |
| from huggingface_hub import login |
| from transformers import BlipProcessor, BlipForConditionalGeneration |
| from PIL import Image |
|
|
| import gradio as gr |
| from diffusers import DiffusionPipeline |
| import torch |
| import spaces |
|
|
| import requests |
| from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor |
| from qwen_vl_utils import process_vision_info |
|
|
| from diffusers import DiffusionPipeline |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
| hf_token = os.getenv('HF_AUTH_TOKEN') |
| if not hf_token: |
| raise ValueError("Hugging Face token is not set in the environment variables.") |
| login(token=hf_token) |
|
|
|
|
|
|
| |
| processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") |
| model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") |
| processor1 = BlipProcessor.from_pretrained("noamrot/FuseCap") |
| model2 = BlipForConditionalGeneration.from_pretrained("noamrot/FuseCap") |
| |
| |
| |
| |
| |
|
|
| pipe3 = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev") |
| pipe3.load_lora_weights("tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator") |
|
|
|
|
|
|
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| |
| model2.to(device) |
| model.to(device) |
| pip3.to(device) |
|
|
|
|
|
|
| @spaces.GPU(duration=150) |
| def generate_caption_and_image(image): |
| img = image.convert("RGB") |
| |
| |
| import random |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| |
|
|
| |
| fabrics = ['cotton', 'silk', 'denim', 'linen', 'polyester', 'wool', 'velvet'] |
| patterns = ['striped', 'floral', 'geometric', 'abstract', 'solid', 'polka dots'] |
| textile_designs = ['woven texture', 'embroidery', 'printed fabric', 'hand-dyed', 'quilting'] |
| |
| |
| selected_fabric = random.choice(fabrics) |
| selected_pattern = random.choice(patterns) |
| selected_textile_design = random.choice(textile_designs) |
| text = "a picture of " |
| inputs = processor(img, text, return_tensors="pt").to(device) |
| |
| out = model.generate(**inputs, num_beams = 3) |
| |
| |
|
|
| caption2 = processor.decode(out[0], skip_special_tokens=True) |
| |
| |
| inputs = processor(image, return_tensors="pt", padding=True, truncation=True, max_length=250) |
| inputs = {key: val.to(device) for key, val in inputs.items()} |
| out = model.generate(**inputs) |
| caption1 = processor.decode(out[0], skip_special_tokens=True) |
|
|
| prompt = f'''Create a highly realistic clothing item based on the following descriptions: The design should reflect {caption1} and {caption2}, blending both themes into a single, stylish, and modern piece of clothing. Incorporate highly realistic and high-quality textures that exude sophistication, with realistic fabric lighting and fine details. Subtly hint at {selected_fabric}, featuring a {selected_pattern} motif and a {selected_textile_design} style that harmoniously balances the essence of both captions.''' |
|
|
| ] |
|
|
| |
| generated_image = pipe3(prompt).images[0] |
|
|
| return prompt, generated_image |
|
|
| |
| iface = gr.Interface( |
| fn=generate_caption_and_image, |
| inputs=gr.Image(type="pil", label="Upload Image"), |
| outputs=[gr.Textbox(label="Generated Caption"), gr.Image(label="Generated Design")], |
| live=True |
| ) |
| iface.launch(share=True) |
|
|
|
|