import gradio as gr from huggingface_hub import InferenceClient # import requests # import io # from PIL import Image # needed a change import os my_key=(os.getenv('my_write_token')) first_client = InferenceClient( provider="hf-inference", api_key=my_key, ) # output is a PIL.Image object def generate_image_model_001(prompt): image = first_client.text_to_image( prompt, model="stabilityai/stable-diffusion-xl-base-1.0", ) return image # second model definitions here second_client = InferenceClient( provider="nebius", api_key=my_key, ) def generate_image_model_002(prompt): image = second_client.text_to_image( prompt+". Add three old south asian cyclists in the background. Two of them are wearing helmets and one is not.", model="black-forest-labs/FLUX.1-dev", ) return image my_interface=gr.Interface( fn=generate_image_model_002, inputs=gr.Textbox(label="Enter your prompt"), outputs=gr.Image(type="pil", label="Generated Image"), title="Image Generation", description="Generate images from text prompts", ) my_interface.launch(share=True)