image_generator / app.py
jamesthong's picture
Create app.py
14e4303 verified
import gradio as gr
import os
from huggingface_hub import InferenceClient, login
login(os.getenv("HUGGINGFACEHUB_API_TOKEN"))
repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
from huggingface_hub import InferenceClient
def image_generator(text_input, style):
system_input = f"You are an expert prompt engineer with artistic flair. "
user_input = f"Write a concise prompt for a {style} image containing {text_input}. Only return the prompt."
messages = [
{"role": "system", "content": system_input},
{"role": "user", "content": user_input},
]
client = InferenceClient( repo_id, )
chat_completion = client.chat_completion(
messages=messages,
max_tokens=500,
)
prompt = chat_completion.choices[0].message.content
client = InferenceClient()
image = client.text_to_image(
prompt=prompt,
model="stabilityai/stable-diffusion-xl-base-1.0",
guidance_scale=8,
seed=42,
)
return prompt, image
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
input_text = gr.Textbox(label="Prompt")
style = gr.Radio(["fun", "interesting"])
prompt = gr.Textbox(interactive=False, visible=True, label="Refined prompt")
output_image = gr.Image(interactive=False, label="Result")
with gr.Row():
reset = gr.ClearButton([input_text])
submit = gr.Button("Submit")
with gr.Column():
submit.click(fn=image_generator, inputs=[input_text, style], outputs=[prompt, output_image])
examples = gr.Examples(
examples=[
["a llama and a cookbook", "fun"],
["a squirrel", "interesting"],
],
inputs=[input_text, style]),
if __name__ == "__main__":
demo.launch()