Spaces:
Sleeping
Sleeping
File size: 19,546 Bytes
744b6ba 4d089a2 744b6ba dab0a85 744b6ba 4d089a2 744b6ba dab0a85 5491458 dab0a85 9d62d44 dab0a85 661d612 9d62d44 dab0a85 f2f9993 dab0a85 68b586b 661d612 dab0a85 e84b737 dab0a85 9d62d44 661d612 dab0a85 9d62d44 dab0a85 661d612 9d62d44 dab0a85 9d62d44 dab0a85 9d62d44 661d612 dab0a85 f2f9993 ceaf08b dab0a85 ceaf08b dab0a85 661d612 dab0a85 9d62d44 661d612 f2f9993 dab0a85 f2f9993 5491458 dab0a85 5491458 4d089a2 dab0a85 4d089a2 dab0a85 4d089a2 dab0a85 5491458 4d089a2 dab0a85 771bd90 dab0a85 771bd90 dab0a85 9d62d44 dab0a85 9d62d44 dab0a85 4d089a2 dab0a85 744b6ba 88d3ed7 dab0a85 744b6ba 01203f2 dab0a85 01203f2 dab0a85 01203f2 dab0a85 01203f2 dab0a85 01203f2 dab0a85 9d62d44 dab0a85 9d62d44 01203f2 dab0a85 744b6ba dab0a85 8a19795 744b6ba dab0a85 744b6ba dab0a85 744b6ba dab0a85 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | import os
import json
import logging
import re
import gradio as gr
# google.generativeai v0.3.1 μ΄μ νμ
import google.generativeai as genai
from dotenv import load_dotenv
import random
from typing import List, Dict, Any, Optional, Tuple
load_dotenv()
# ------------------- λ‘κΉ
μ€μ -------------------
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# ------------------- Gemini API μ€μ -------------------
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY", "")
if not GEMINI_API_KEY:
logger.error("Gemini API ν€κ° .env νμΌμ μ€μ λμ§ μμμ΅λλ€. λ²μ λ° ν둬ννΈ μμ±μ΄ μλνμ§ μμ μ μμ΅λλ€.")
else:
try:
genai.configure(api_key=GEMINI_API_KEY)
logger.info("Gemini API ν€κ° μ±κ³΅μ μΌλ‘ μ€μ λμμ΅λλ€.")
except Exception as e:
logger.error(f"Gemini API ν€ μ€μ μ€ μ€λ₯ λ°μ: {e}")
GEMINI_API_KEY = "" # μ€λ₯ λ°μ μ ν€ λΉνμ±ν
# ------------------- λ νΌλ°μ€ λ°μ΄ν° λ‘λ -------------------
CLOTHING_REFERENCES_PATH = 'clothing_references.json' # μ€μ νμΌ κ²½λ‘λ‘ μμ νμΈμ
clothing_references: List[Dict[str, Any]] = []
try:
# UTF-8 μΈμ½λ© λͺ
μμ μ§μ
with open(CLOTHING_REFERENCES_PATH, 'r', encoding='utf-8') as f:
clothing_references = json.load(f).get("clothing_references", [])
if not clothing_references:
logger.warning(f"{CLOTHING_REFERENCES_PATH} νμΌμμ 'clothing_references' ν€λ₯Ό μ°Ύμ μ μκ±°λ λΉμ΄ μμ΅λλ€.")
else:
logger.info(f"{len(clothing_references)}κ°μ μλ₯ λ νΌλ°μ€λ₯Ό μ±κ³΅μ μΌλ‘ λ‘λνμ΅λλ€.")
except FileNotFoundError:
logger.error(f"λ νΌλ°μ€ νμΌ({CLOTHING_REFERENCES_PATH})μ μ°Ύμ μ μμ΅λλ€. λ νΌλ°μ€ κΈ°λ₯ μμ΄ μλν©λλ€.")
clothing_references = []
except json.JSONDecodeError:
logger.error(f"λ νΌλ°μ€ νμΌ({CLOTHING_REFERENCES_PATH})μ νμμ΄ μλͺ»λμμ΅λλ€. JSON νμ±μ μ€ν¨νμ΅λλ€.")
clothing_references = []
except Exception as e:
logger.error(f"λ νΌλ°μ€ νμΌ λ‘λ μ€ μμμΉ λͺ»ν μ€λ₯ λ°μ: {e}")
clothing_references = []
# ------------------- Geminiλ₯Ό μ¬μ©ν λ²μ ν¨μ (μ κ· μΆκ°) -------------------
def translate_with_gemini(text_to_translate: str) -> str:
"""Gemini APIλ₯Ό μ¬μ©νμ¬ νκ΅μ΄ ν
μ€νΈλ₯Ό μμ΄λ‘ λ²μν©λλ€."""
if not text_to_translate:
return ""
if not GEMINI_API_KEY:
logger.warning("Gemini API ν€κ° μμ΄ λ²μμ 건λ<0xEB><0x9B><0x84>λλ€. μλ³Έ ν
μ€νΈλ₯Ό λ°νν©λλ€.")
return text_to_translate
try:
# --- λͺ¨λΈλͺ
λ³κ²½ ---
model = genai.GenerativeModel('gemini-2.0-flash')
# -------------------
prompt = f"""Translate the following Korean text into natural-sounding English.
Respond ONLY with the translated English text and nothing else.
Korean Text:
{text_to_translate}
English Translation:"""
response = model.generate_content(
prompt,
generation_config=genai.types.GenerationConfig(
temperature=0.2,
max_output_tokens=len(text_to_translate) * 3
)
)
translated_text = response.text.strip()
translated_text = translated_text.strip('"')
logger.info(f"λ²μ μ±κ³΅: '{text_to_translate}' -> '{translated_text}' (Model: gemini-2.0-flash)")
return translated_text
except Exception as e:
logger.error(f"Gemini λ²μ μ€ μ€λ₯ λ°μ ('{text_to_translate}') with gemini-2.0-flash: {e}. μλ³Έ ν
μ€νΈλ₯Ό λ°νν©λλ€.")
return text_to_translate
# ------------------- Geminiλ₯Ό μ¬μ©ν ν둬ννΈ μμ± ν¨μ (λ νΌλ°μ€ νμ©) -------------------
def generate_prompt_with_gemini(person_description_en, item_description_2_en, item_description_3_en, custom_prompt_en, reference: Optional[Dict[str, Any]]):
"""Gemini APIλ₯Ό μ¬μ©ν΄ κ°μ λ ν둬ννΈ μμ± (λλ€ λ νΌλ°μ€ μ 보 νμ©)"""
if not GEMINI_API_KEY:
logger.error("Gemini API ν€κ° μ€μ λμ§ μμμ΅λλ€.")
return "μ€λ₯: Gemini API ν€κ° μμ΄ ν둬ννΈ μμ±μ μ§νν μ μμ΅λλ€."
try:
system_instruction = f"""
You are an expert prompt generator for virtual fashion try-on using an AI image generator like Midjourney.
Your goal is to create ONE highly detailed and realistic prompt in natural English that visually describes a complete scene, incorporating elements from a provided reference scenario.
Your response MUST include the exact input texts for the person and items in the following format:
- The person description MUST appear as: "({person_description_en} from image #1)"
- The first item description MUST appear as: "({item_description_2_en} from image #2)"
- The second item description MUST appear as: "({item_description_3_en} from image #3)"
The prompt should describe a specific lifestyle scene inspired by the reference details (setting, background, pose, camera angle, style).
Critically, ensure the model's face from (image #1) is **exactly preserved, clear, sharp, and realistically integrated** into the scene.
The final image should look like a high-quality photograph.
Return ONLY the prompt as one complete English paragraph.
Always end the prompt with the following parameters, maintaining the specified seed: --ar 9:16 --face #1 --seed 123456 --q 3 --v 5.2 --style raw
"""
prompt_person = f"({person_description_en} from image #1)"
prompt_items = f"({item_description_2_en} from image #2) and ({item_description_3_en} from image #3)"
reference_details = ""
if reference:
reference_details = f"""
Reference Scenario Details to inspire the scene:
- Style: {reference.get('style', 'N/A')}
- Setting: {reference.get('setting', 'N/A')}
- Background: {reference.get('background', 'N/A')}
- Pose: {reference.get('pose', 'N/A')}
- Camera Angle: {reference.get('camera_angle', 'standard view')}
- Lens Type: {reference.get('lens_type', 'standard lens')}
- Focus/Light: {reference.get('focus_style', 'standard focus and light')}
"""
prompt_request = f"""
Create a detailed Midjourney prompt for a virtual fitting:
- The person is: {prompt_person}.
- They are wearing: {prompt_items}.
- Additional user request: "{custom_prompt_en if custom_prompt_en else 'None'}"
{reference_details}
IMPORTANT INSTRUCTIONS:
- Generate ONE single paragraph prompt in natural, descriptive English.
- Describe a specific, realistic lifestyle scene incorporating the reference details.
- Emphasize **exact preservation, clarity, and sharpness of the face** from image #1.
- The output should resemble a high-quality photograph.
- Do NOT explain anything. Do NOT include bullet points or extra text.
- Ensure the prompt ends ONLY with: --ar 9:16 --face #1 --seed 123456 --q 3 --v 5.2 --style raw
"""
# --- λͺ¨λΈλͺ
λ³κ²½ ---
model = genai.GenerativeModel(
'gemini-2.0-flash', # λͺ¨λΈλͺ
λ³κ²½
system_instruction=system_instruction
)
# -------------------
logger.info(f"Geminiμκ² ν둬ννΈ μμ± μμ² μμ (Model: gemini-2.0-flash)")
response = model.generate_content(
prompt_request,
generation_config=genai.types.GenerationConfig(
temperature=0.7,
top_p=0.95,
top_k=40,
max_output_tokens=8192 # λͺ¨λΈ μ΅λμΉ νμΈ νμ
)
)
logger.info(f"Gemini μλ΅ μμ (ν둬ννΈ μμ±)")
try:
enhanced_prompt = response.text.strip()
except AttributeError as ae:
logger.warning(f"Gemini μλ΅μμ .text μμ±μ μ°Ύμ μ μμ: {ae}. μλ΅ κ°μ²΄ ꡬ쑰 νμΈ νμ.")
try:
if response.candidates:
enhanced_prompt = response.candidates[0].content.parts[0].text.strip()
else:
enhanced_prompt = "β οΈ Gemini μλ΅ νμ± μ€ν¨ (candidates μμ)"
except Exception as e:
logger.warning(f"Gemini μλ΅ λ체 νμ± μ€ν¨: {str(e)}. μλ΅: {response}")
enhanced_prompt = "β οΈ Gemini μλ΅ νμ± μμ μ€ν¨"
except Exception as e:
logger.error(f"Gemini μλ΅ νμ± μ€ μμμΉ λͺ»ν μ€λ₯: {str(e)}. μλ΅: {response}")
enhanced_prompt = "β οΈ Gemini μλ΅ νμ± μ€ μ€λ₯ λ°μ"
required_params = "--ar 9:16 --face #1 --seed 123456 --q 3 --v 5.2 --style raw"
if not enhanced_prompt.endswith(required_params):
prompt_base = re.sub(r'--ar\s+\S+\s+--face\s+\S+\s+--seed\s+\d+\s+--q\s+\d+(\.\d+)?\s+--v\s+\S+\s+--style\s+\S+$', '', enhanced_prompt).strip()
enhanced_prompt = f"{prompt_base} {required_params}"
enhanced_prompt = filter_prompt_only(enhanced_prompt)
logger.info(f"Gemini μμ± ν둬ννΈ (νν°λ§ ν): {enhanced_prompt}")
return enhanced_prompt
except Exception as e:
logger.exception("Gemini ν둬ννΈ μμ± μ€ μ¬κ°ν μ€λ₯ λ°μ:")
return f"μ€λ₯: Gemini ν둬ννΈ μμ± μ€ν¨ ({str(e)}). κΈ°λ³Έ ν둬ννΈλ₯Ό μ¬μ©ν©λλ€."
def filter_prompt_only(prompt: str) -> str:
"""Geminiμ μ€λͺ
λ° λΆνμν λ©μμ§λ₯Ό μ κ±°νκ³ μ€μ ν둬ννΈλ§ μΆμΆνλ ν¨μ (λ³κ²½ μμ)"""
prompt = prompt.strip()
prompt = re.sub(r"```[a-zA-Z]*\n(.*?)\n```", r"\1", prompt, flags=re.DOTALL)
prompt = prompt.strip('`')
start_phrases = [
"Here's the generated prompt:", "Here is the prompt:", "Okay, here's the prompt:",
"Enhanced prompt:", "Generated prompt:", "Prompt:", "Here's an enhanced prompt:",
"Here is the improved prompt:", "I've refined the prompt:", "Below is the prompt:",
"The enhanced prompt is:"
]
end_phrases = [
"I hope this helps!", "Let me know if you need adjustments.", "Enjoy generating!",
"This prompt aims to fulfill all requirements."
]
for phrase in start_phrases:
if prompt.lower().startswith(phrase.lower()):
prompt = prompt[len(phrase):].lstrip(':').strip()
break
prompt_lines = prompt.split('\n')
if len(prompt_lines) > 1:
last_line = prompt_lines[-1].strip()
for phrase in end_phrases:
if last_line.startswith(phrase):
prompt = '\n'.join(prompt_lines[:-1]).strip()
break
required_params = "--ar 9:16 --face #1 --seed 123456 --q 3 --v 5.2 --style raw"
if required_params in prompt:
base_prompt = prompt.split(required_params)[0].strip()
prompt = f"{base_prompt} {required_params}"
elif not prompt.endswith(required_params):
logger.warning("Gemini κ²°κ³Όμμ νμ νλΌλ―Έν° λλ½ νμΈ, κ°μ μΆκ°")
prompt = f"{prompt.strip()} {required_params}"
return prompt.strip()
# ------------------- κΈ°λ³Έ ν둬ννΈ μμ± ν¨μ (Gemini μ€ν¨ μ Fallback) -------------------
def generate_basic_prompt(person_description_ko, item_description_2_ko, item_description_3_ko, custom_prompt_ko):
"""μ¬μ©μ μ
λ ₯ κΈ°λ°μ κΈ°λ³Έ ν둬ννΈλ₯Ό μμ± (Gemini μ€ν¨ μ μ¬μ©, μ
λ ₯μ νκ΅μ΄)"""
person_en = translate_with_gemini(person_description_ko)
item2_en = translate_with_gemini(item_description_2_ko)
item3_en = translate_with_gemini(item_description_3_ko)
custom_en = translate_with_gemini(custom_prompt_ko)
combined_item_description = f"({item2_en} from image #2) and ({item3_en} from image #3)"
base_prompt = (f"Hyperrealistic lifestyle portrait of a ({person_en} from image #1) wearing "
f"{combined_item_description}. "
f"Her face is exactly preserved from (image #1)")
if custom_en.strip():
base_prompt += f", {custom_en.strip()}"
base_prompt += " --ar 9:16 --face #1 --seed 123456 --q 3 --v 5.2 --style raw"
logger.info(f"κΈ°λ³Έ ν둬ννΈ μμ±λ¨ (λ²μ μλλ¨): {base_prompt}")
return base_prompt
# ------------------- μ΅μ’
ν둬ννΈ μμ± ν¨μ (μμ λ¨) -------------------
def generate_final_prompt(model_image, item_image_2, item_image_3, person_description_ko, item_description_2_ko, item_description_3_ko, custom_prompt_ko):
if not model_image or not item_image_2 or not item_image_3:
return "μ€λ₯: λͺ¨λΈ μ΄λ―Έμ§(#1)μ μμ΄ν
μ΄λ―Έμ§(#2, #3)λ₯Ό λͺ¨λ μ
λ‘λν΄μ£ΌμΈμ."
if not person_description_ko or not item_description_2_ko or not item_description_3_ko:
return "μ€λ₯: μΈλ¬Ό μ€λͺ
κ³Ό λ μμ΄ν
μ€λͺ
μ λͺ¨λ μ
λ ₯ν΄μ£ΌμΈμ."
if not GEMINI_API_KEY:
logger.error("Gemini API ν€κ° μμ΄ μ§νν μ μμ΅λλ€. .env νμΌμ νμΈνμΈμ.")
return "μ€λ₯: Gemini API ν€κ° μ€μ λμ§ μμμ΅λλ€. ν둬ννΈλ₯Ό μμ±ν μ μμ΅λλ€."
logger.info("μ
λ ₯λ νκ΅μ΄ μ€λͺ
μ μμ΄λ‘ λ²μ μμ (Model: gemini-2.0-flash)...")
translated_person = translate_with_gemini(person_description_ko)
translated_item_2 = translate_with_gemini(item_description_2_ko)
translated_item_3 = translate_with_gemini(item_description_3_ko)
translated_custom = translate_with_gemini(custom_prompt_ko)
logger.info("λ²μ μλ£ (μ€λ₯ μ μλ³Έ ν
μ€νΈ μ¬μ©λ¨).")
if translated_person == person_description_ko and person_description_ko:
logger.warning(f"μΈλ¬Ό μ€λͺ
({person_description_ko}) λ²μ μ€ν¨ λλ μ΄λ―Έ μμ΄μΌ μ μμ΅λλ€.")
selected_reference = None
if clothing_references:
selected_reference = random.choice(clothing_references)
logger.info(f"μ νλ λ νΌλ°μ€ ID: {selected_reference.get('id', 'N/A')}, μ€νμΌ: {selected_reference.get('style', 'N/A')}")
else:
logger.warning("μ¬μ© κ°λ₯ν λ νΌλ°μ€κ° μμ΄ λ νΌλ°μ€ μμ΄ μ§νν©λλ€.")
try:
# --- λͺ¨λΈλͺ
λ³κ²½ λ°μλ¨ ---
generated_prompt = generate_prompt_with_gemini(
translated_person,
translated_item_2,
translated_item_3,
translated_custom,
selected_reference
)
# -------------------------
if "μ€λ₯:" in generated_prompt or "β οΈ" in generated_prompt:
logger.warning(f"Gemini ν둬ννΈ μμ± μ€ν¨ λλ μ€λ₯ ν¬ν¨: {generated_prompt}. κΈ°λ³Έ ν둬ννΈλ₯Ό μ¬μ©ν©λλ€.")
return generate_basic_prompt(person_description_ko, item_description_2_ko, item_description_3_ko, custom_prompt_ko)
else:
return generated_prompt
except Exception as e:
logger.exception("μ΅μ’
ν둬ννΈ μμ± κ³Όμ μμ μμΈ λ°μ:")
return generate_basic_prompt(person_description_ko, item_description_2_ko, item_description_3_ko, custom_prompt_ko)
# ------------------- Gradio μΈν°νμ΄μ€ κ΅¬μ± (λ³κ²½ μμ) -------------------
def create_app():
with gr.Blocks(title="κ°μ νΌν
μ€νλμ€") as demo:
gr.Markdown("# κ°μ νΌν
μ€νλμ€")
gr.Markdown("""
μ’μΈ‘μ μ
λ ₯ μΉμ
, μ°μΈ‘μ μΆλ ₯ μΉμ
μ
λλ€.
- **μ΄λ―Έμ§ μ
λ‘λ:** #1(μΈλ¬Ό), #2(μμ΄ν
), #3(μμ΄ν
) μ΄λ―Έμ§λ₯Ό μ
λ‘λνμΈμ.
- **μ€λͺ
μ
λ ₯:** κ° μ΄λ―Έμ§μ λν μ€λͺ
μ **νκ΅μ΄**λ‘ μ
λ ₯νμΈμ (μ: λ―Έμμ§λ μ μ μ¬μ±, λ² μ΄μ§μ μΈ μ½νΈ, μ²λ°μ§). Geminiκ° μμ΄λ‘ λ²μν©λλ€.
- **컀μ€ν
μ€λͺ
:** μΆκ°νκ³ μΆμ λ°°κ²½, ν¬μ¦, λΆμκΈ° λ±μ **νκ΅μ΄**λ‘ μ
λ ₯νμΈμ (μ ν μ¬ν).
- **ν둬ννΈ μμ±:** λ²νΌμ λλ₯΄λ©΄ μ
λ ₯ μ€λͺ
μ μμ΄λ‘ λ²μνκ³ , λλ€ λ νΌλ°μ€μ μ‘°ν©νμ¬ Midjourneyμ© ν둬ννΈλ₯Ό μμ±ν©λλ€.
- **μλ κ³ μ :** ν둬ννΈμλ νμ `--seed 123456`μ΄ ν¬ν¨λμ΄ μΌκ΄μ±μ μ μ§ν©λλ€.
- **λ νΌλ°μ€:** `clothing_references.json` νμΌμμ λλ€νκ² λ°°κ²½, μΉ΄λ©λΌ μ· λ±μ μ°Έμ‘°ν©λλ€.
- **μ£Όμ:** λ²μ λ° ν둬ννΈ μμ±μ Gemini API(gemini-2.0-flash)κ° μ¬μ©λλ―λ‘ API ν€κ° νμνλ©°, μ½κ°μ μ²λ¦¬ μκ°μ΄ μμλ μ μμ΅λλ€.
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("## μ
λ ₯ μΉμ
")
with gr.Row():
model_image = gr.Image(label="λͺ¨λΈ μ΄λ―Έμ§ (#1)", type="pil", sources=["upload"])
item_image_2 = gr.Image(label="μμ΄ν
μ΄λ―Έμ§ (#2)", type="pil", sources=["upload"])
item_image_3 = gr.Image(label="μμ΄ν
μ΄λ―Έμ§ (#3)", type="pil", sources=["upload"])
with gr.Row():
person_description_ko = gr.Textbox(
label="μΈλ¬Όμ€λͺ
(#1) (νκ΅μ΄)",
placeholder="μ: λ―Έμμ§λ μ μ μ¬μ±",
lines=1, interactive=True
)
item_description_2_ko = gr.Textbox(
label="μμ΄ν
μ€λͺ
(#2) (νκ΅μ΄)",
placeholder="μ: λ² μ΄μ§μ μΈ μ½νΈ",
lines=1, interactive=True
)
item_description_3_ko = gr.Textbox(
label="μμ΄ν
μ€λͺ
(#3) (νκ΅μ΄)",
placeholder="μ: λ°μ μμ±μ μ²λ°μ§",
lines=1, interactive=True
)
custom_prompt_ko = gr.Textbox(
label="컀μ€ν
μν© μ€λͺ
(νκ΅μ΄, μ ν μ¬ν)",
placeholder="μ: ν리μ κ°μ 거리μμ 컀νΌλ₯Ό λ€κ³ μμ, λ°λ»ν μ€ν νμ΄",
lines=2, interactive=True
)
prompt_btn = gr.Button("ν둬ννΈ μμ± (λ²μ ν¬ν¨)", variant="primary")
with gr.Column(scale=1):
gr.Markdown("## μΆλ ₯ μΉμ
")
prompt_output = gr.Textbox(
label="μμ±λ Midjourney ν둬ννΈ (μλ¬Έ)",
lines=15,
interactive=False
)
prompt_btn.click(
fn=generate_final_prompt,
inputs=[model_image, item_image_2, item_image_3, person_description_ko, item_description_2_ko, item_description_3_ko, custom_prompt_ko],
outputs=[prompt_output]
)
return demo
if __name__ == "__main__":
if not GEMINI_API_KEY:
print("κ²½κ³ : Gemini API ν€κ° μ€μ λμ§ μμμ΅λλ€. λ²μ λ° ν둬ννΈ μμ±μ΄ μ νλ μ μμ΅λλ€.")
# μ¬μ©νλ €λ λͺ¨λΈλͺ
νμΈ (Google AI Studio λλ API λ¬Έμ μ°Έκ³ )
print("μ¬μ©ν Gemini λͺ¨λΈ: gemini-2.0-flash (API ν€ νκ²½μμ μ¬μ© κ°λ₯νμ§ νμΈ νμ)")
if not clothing_references:
print("κ²½κ³ : clothing_references.json νμΌμ λ‘λνμ§ λͺ»νκ±°λ λΉμ΄μμ΅λλ€. λ νΌλ°μ€ κΈ°λ₯ μμ΄ μ€νλ©λλ€.")
else:
print(f"{len(clothing_references)}κ°μ λ νΌλ°μ€λ₯Ό μ¬μ©νμ¬ μ±μ μμν©λλ€.")
app = create_app()
app.queue()
app.launch() |