import gradio as gr from huggingface_hub import InferenceClient # import requests # import io # from PIL import Image # needed a change import os my_key=(os.getenv('my_write_token')) hf_client = InferenceClient( provider="hf-inference", api_key=my_key, ) # output is a PIL.Image object def generate_image_model_001(prompt): image = hf_client.text_to_image( prompt, model="stabilityai/stable-diffusion-xl-base-1.0", ) return image # second model definitions here together_client = InferenceClient( provider="nebius", api_key=my_key, ) def generate_image_model_002(prompt): image = together_client.text_to_image( prompt, model="black-forest-labs/FLUX.1-dev", ) return image my_interface=gr.Interface( fn=generate_image_model_002, inputs=gr.Textbox(label="Enter your prompt"), outputs=gr.Image(type="pil", label="Generated Image"), title="Stable Diffusion Image Generation", description="Generate images from text prompts using Stable Diffusion.", ) my_interface.launch(share=True)