Spaces:
Sleeping
Sleeping
File size: 1,148 Bytes
4843501 88ef8a5 bce0bda 88ef8a5 bce0bda 88ef8a5 bce0bda 5fc77e2 88ef8a5 bce0bda bf2b9e2 88ef8a5 bce0bda 88ef8a5 4843501 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
from huggingface_hub import InferenceClient
# import requests
# import io
# from PIL import Image
# needed a change
import os
my_key=(os.getenv('my_write_token'))
first_client = InferenceClient(
provider="hf-inference",
api_key=my_key,
)
# output is a PIL.Image object
def generate_image_model_001(prompt):
image = first_client.text_to_image(
prompt,
model="stabilityai/stable-diffusion-xl-base-1.0",
)
return image
# second model definitions here
second_client = InferenceClient(
provider="nebius",
api_key=my_key,
)
def generate_image_model_002(prompt):
image = second_client.text_to_image(
prompt+". Add three old south asian cyclists in the background. Two of them are wearing helmets and one is not.",
model="black-forest-labs/FLUX.1-dev",
)
return image
my_interface=gr.Interface(
fn=generate_image_model_002,
inputs=gr.Textbox(label="Enter your prompt"),
outputs=gr.Image(type="pil", label="Generated Image"),
title="Image Generation",
description="Generate images from text prompts",
)
my_interface.launch(share=True)
|