protan_test / app.py
shumpei2525's picture
Update app.py
30e32ff verified
import gradio as gr
import numpy as np
import random
import spaces # [uncomment to use ZeroGPU]
from diffusers import DiffusionPipeline
import torch
import os
from huggingface_hub import login
from openai import OpenAI
# Initialize API keys and login
hf_token = os.getenv("space_token")
openai_api_key = os.getenv("openai_apikey")
login(token=hf_token)
def make_prompt(place):
client = OpenAI(api_key=openai_api_key)
messages = [
{
"role": "system",
"content": """userの入力するplace_infoを基に、英単語を、3つ羅列してください。
その英単語を基に、男の人の画像生成を行います。英単語の順番は以下の通りです。
場所, 行っている動作, 背景の様子
ex: The ocean, swimming, with sharks
##output format:
place_hoge, moving_hoge, background_hoge"""
},
{"role": "user", "content": "place_info: nature"},
{"role": "assistant", "content": "forest, exploration, there are tigers"},
{"role": "user", "content": "place_info: " + place}
]
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
temperature=1,
)
# Assuming the assistant returns something like "mountains, hiking, clear sky"
generated_content = response.choices[0].message.content.strip()
prompt = "Purotan, short brown hair, bright smile, " + generated_content
return prompt
# Set device and load model
device = "cuda" if torch.cuda.is_available() else "cpu"
model_repo_id = "black-forest-labs/FLUX.1-dev" # Replace with your model
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
pipe = pipe.to(device)
pipe.load_lora_weights("purotan_1750.safetensors")
# Constants
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
# Define the image generation function
@spaces.GPU # [uncomment to use ZeroGPU]
def generate_image(place, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
prompt = make_prompt(place)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator
).images[0]
return image, seed
# CSS for styling
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
# Define the Gradio interface
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("""
# Text-to-Image Gradio Template
ボタンを押して場所を選択し、画像を生成してください!
""")
# Place Selection Buttons
with gr.Row():
btn_nature = gr.Button("自然")
btn_cityscape = gr.Button("都市景観")
btn_fantasy = gr.Button("ファンタジー世界")
btn_daily = gr.Button("日常生活")
btn_space = gr.Button("宇宙")
# Display Selected Place
selected_place_display = gr.Markdown("**選択された場所:** 自然")
# Run Button
run_button = gr.Button("Run", scale=0)
# Image Output
result = gr.Image(label="Result", show_label=False)
# Advanced Settings Accordion
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=720, # Adjust based on your model's capabilities
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1280, # Adjust based on your model's capabilities
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=3.5, # Adjust based on your model's capabilities
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=20, # Adjust based on your model's capabilities
)
# Removed the gr.Examples section to fix the ValueError
# If you wish to add examples, ensure they align with the input components
# State to keep track of selected place
selected_place = gr.State("自然") # Default to "自然"
# Define functions to set the selected place and update the display
def set_place(place):
return place, f"**選択された場所:** {place}"
# Connect buttons to state setter functions using lambda
btn_nature.click(fn=lambda: set_place("自然"), outputs=[selected_place, selected_place_display])
btn_cityscape.click(fn=lambda: set_place("都市景観"), outputs=[selected_place, selected_place_display])
btn_fantasy.click(fn=lambda: set_place("ファンタジー世界"), outputs=[selected_place, selected_place_display])
btn_daily.click(fn=lambda: set_place("日常生活"), outputs=[selected_place, selected_place_display])
btn_space.click(fn=lambda: set_place("宇宙"), outputs=[selected_place, selected_place_display])
# Connect Run button to the image generation function
run_button.click(
fn=generate_image,
inputs=[
selected_place,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps
],
outputs=[result, seed]
)
demo.queue().launch()