| import os |
| from huggingface_hub import login |
| from transformers import BlipProcessor, BlipForConditionalGeneration |
| from transformers import MllamaForConditionalGeneration, AutoProcessor |
| from PIL import Image |
|
|
| import gradio as gr |
| from diffusers import DiffusionPipeline |
| import torch |
| import spaces |
|
|
| import requests |
| from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor |
| from qwen_vl_utils import process_vision_info |
|
|
| from diffusers import DiffusionPipeline |
|
|
|
|
|
|
| fabrics = ['cotton', 'silk', 'denim', 'linen', 'polyester', 'wool', 'velvet'] |
| patterns = ['striped', 'floral', 'geometric', 'abstract', 'solid', 'polka dots'] |
| textile_designs = ['woven texture', 'embroidery', 'printed fabric', 'hand-dyed', 'quilting'] |
|
|
|
|
|
|
|
|
| |
| hf_token = os.getenv('HF_AUTH_TOKEN') |
| if not hf_token: |
| raise ValueError("Hugging Face token is not set in the environment variables.") |
| login(token=hf_token) |
|
|
|
|
|
|
|
|
| model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct" |
|
|
| model = MllamaForConditionalGeneration.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16, |
| device_map="auto", |
| ) |
| processor = AutoProcessor.from_pretrained(model_id) |
|
|
| |
| |
| |
| |
| |
| |
| from diffusers import FluxPipeline |
|
|
| pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) |
|
|
|
|
|
|
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| |
| model2.to(device) |
| model.to(device) |
| pipe.to(device) |
|
|
|
|
|
|
| @spaces.GPU(duration=150) |
| def generate_caption_and_image(image, f, p, d): |
| if f!=None and p!=None and d!=None and image!=None: |
| img = image.convert("RGB") |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| messages = [{"role": "user", "content": [{"type": "image"},{"type": "text", "text": "If I had to write a haiku for this one, it would be: "}]}] |
| input_text = processor.apply_chat_template(messages, add_generation_prompt=True) |
| inputs = processor(image,input_text,add_special_tokens=False,return_tensors="pt").to(model.device) |
| |
| output = model.generate(**inputs, max_new_tokens=30) |
| caption =processor.decode(output[0]) |
| image = pipe(prompt,height=1024,width=1024,guidance_scale=3.5,num_inference_steps=50,max_sequence_length=512,generator=torch.Generator("cpu").manual_seed(0)).images[0] |
| return image |
| return None |
| |
| iface = gr.Interface( |
| fn=generate_caption_and_image, |
| inputs=[gr.Image(type="pil", label="Upload Image"), gr.Radio(fabrics, label="Select Fabric"), gr.Radio(patterns, label="Select Pattern"), gr.Radio(textile_designs, label="Select Textile Design")], |
| |
| outputs=[gr.Image(label="Generated Design 1")], |
| live=True |
| ) |
| iface.launch(share=True) |
|
|
|
|
|
|
|
|
|
|