jagirl / app.py
kechiro's picture
app.py update
d7211e7
"""
Jagirl Image Generator - Main Application
統合機能:
- Gradio UI フレームワーク
- aipicasso/jagirl モデルによる高品質画像生成
- 詳細パラメータ制御とログ機能
- Text-to-Image 対応
"""
# ZeroGPU対応: spacesモジュールがある場合のみGPUデコレータを使用
try:
import spaces # type: ignore
gpu_execution = spaces.GPU
except ImportError: # ローカル・非ZeroGPU環境ではダミー化
def gpu_execution(*decorator_args, **decorator_kwargs):
if decorator_args and callable(decorator_args[0]) and not decorator_kwargs:
return decorator_args[0]
def _identity_decorator(func):
return func
return _identity_decorator
import gradio as gr
import torch
from diffusers import (
StableDiffusionXLPipeline,
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
PNDMScheduler,
LMSDiscreteScheduler
)
from huggingface_hub import login
import os
import base64
from datetime import datetime
# 環境変数の読み込み(dotenvがあれば使用)
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
pass # dotenvがない場合はスキップ
import random
import json
import logging
from pathlib import Path
import traceback
from PIL import Image
import time
# 統合ロガーのインポート
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))
from logger import get_logger, log_generation
# 標準ロガー設定
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 定数定義
HISTORY_FILE = "logs/generation_history.json"
OUTPUT_DIR = "outputs"
MODEL_NAME = os.getenv("MODEL_NAME", "aipicasso/jagirl") # 環境変数で変更可能
# 統合ロガーインスタンス
unified_logger = get_logger("logs")
hf_authenticated = False
def ensure_hf_login():
"""必要に応じて Hugging Face Hub へ認証する"""
global hf_authenticated
if hf_authenticated:
return
token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
if token:
try:
login(token=token, add_to_git_credential=False)
logger.info("🔐 Hugging Face Hub に認証しました")
except Exception as exc:
logger.error("❌ Hugging Face Hub の認証に失敗しました: %s", exc)
else:
logger.warning("⚠️ Hugging Face Hub のトークンが設定されていません。公開モデルのみアクセス可能です")
hf_authenticated = True
# グローバル変数でパイプラインを管理
txt2img_pipe = None
model_loaded = False
def setup_scheduler(pipe, scheduler_type="default"):
"""
スケジューラーの設定
Args:
pipe: StableDiffusionXLPipeline
scheduler_type: スケジューラータイプ
- "default": デフォルト
- "DDIM": 高品質、少ないステップ
- "DPMSolver": 高速で高品質(推奨)
- "Euler": 安定した結果
- "EulerA": より多様な結果
- "LMS": 古典的手法
- "PNDM": デフォルト
Returns:
設定されたscheduler
"""
schedulers = {
"DDIM": DDIMScheduler,
"DPMSolver": DPMSolverMultistepScheduler,
"Euler": EulerDiscreteScheduler,
"EulerA": EulerAncestralDiscreteScheduler,
"PNDM": PNDMScheduler
}
# LMSはscipyが必要なため、利用可能な場合のみ追加
try:
schedulers["LMS"] = LMSDiscreteScheduler
except:
logger.warning("⚠️ LMSスケジューラーは利用できません (scipyが必要)")
if scheduler_type != "default" and scheduler_type in schedulers:
try:
return schedulers[scheduler_type].from_config(pipe.scheduler.config)
except ImportError as e:
logger.warning(f"⚠️ {scheduler_type}スケジューラーが利用できません: {e}")
return pipe.scheduler
return pipe.scheduler
def setup_model():
"""モデルのセットアップと最適化"""
global txt2img_pipe, model_loaded
if model_loaded:
return True
try:
logger.info("🔧 モデルをセットアップ中...")
ensure_hf_login()
# GPU確認
if not torch.cuda.is_available():
logger.error("❌ CUDA が利用できません。GPUを確認してください。")
return False
device = "cuda"
logger.info(f"✅ デバイス: {device}")
# Text-to-Image パイプライン
logger.info(f"📦 Text-to-Image パイプライン読み込み中: {MODEL_NAME}")
txt2img_pipe = StableDiffusionXLPipeline.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16,
use_safetensors=True
).to(device)
# GPU移動後にFP16に変換
try:
txt2img_pipe = txt2img_pipe.to(dtype=torch.float16)
logger.info("✅ FP16モードに変換")
except:
logger.warning("⚠️ FP16変換をスキップ、FP32で継続")
# メモリ効率化 (xformersは使用しない - CPU版PyTorchのため)
try:
txt2img_pipe.enable_xformers_memory_efficient_attention()
logger.info("✅ xFormers メモリ効率化を有効化")
except Exception as e:
logger.warning(f"⚠️ xFormers無効 (CPU版PyTorch使用中): {e}")
# CPU Offloadは無効化(全てGPUで処理)
logger.info("🎯 GPU専用モードで動作")
logger.info("✅ モデルセットアップ完了")
model_loaded = True
return True
except Exception as e:
logger.error(f"❌ モデルセットアップ失敗: {e}")
return False
def log_generation_details(prompt, negative_prompt, params, output_filepath, execution_time):
"""
生成詳細のログ記録(統合ロガー使用)
Args:
prompt: メインプロンプト
negative_prompt: ネガティブプロンプト
params: 生成パラメータ辞書
output_filepath: 生成画像のファイルパス
execution_time: 実行時間(秒)
Returns:
generation_id: 生成記録のユニークID
"""
try:
generation_id = unified_logger.log_generation(
prompt=prompt,
negative_prompt=negative_prompt,
parameters=params,
output_filepath=output_filepath,
execution_time=execution_time
)
logger.info(f"📝 生成ログを記録: {generation_id}")
return generation_id
except Exception as e:
logger.error(f"❌ ログ記録失敗: {e}")
traceback.print_exc()
return None
def load_generation_history():
"""生成履歴を読み込む(統合ロガー形式)"""
try:
if os.path.exists(HISTORY_FILE):
with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
data = json.load(f)
# 統合ロガーの形式: {"generations": [...]}
if isinstance(data, dict) and 'generations' in data:
generations = data['generations']
# 最新10件を返す
return generations[-10:] if len(generations) > 10 else generations
# 古い形式(リスト)の場合
elif isinstance(data, list):
return data[-10:]
else:
return []
return []
except Exception as e:
logger.error(f"履歴読み込み失敗: {e}")
return []
def format_history_display():
"""履歴表示用のフォーマット(統合ロガー形式対応)"""
history = load_generation_history()
if not history:
return "📝 生成履歴がありません"
display_text = "## 📋 Recent Generation History (最新10件)\n\n"
for i, entry in enumerate(reversed(history), 1):
# 統合ロガー形式のフィールド
gen_id = entry.get('generation_id', 'Unknown')
timestamp = entry.get('timestamp', 'Unknown')
prompt = entry.get('prompt', 'No prompt')
# プロンプトが長い場合は省略
prompt_display = prompt[:50] + "..." if len(prompt) > 50 else prompt
# パラメータから情報取得
params = entry.get('parameters', {})
seed = params.get('seed', 'N/A')
steps = params.get('num_inference_steps', 'N/A')
# 結果情報
result = entry.get('result', {})
success = result.get('success', False)
exec_time = result.get('execution_time_seconds', 0)
status = "✅ Success" if success else "❌ Failed"
display_text += f"### {i}. {status}\n"
display_text += f"**ID:** {gen_id}\n"
display_text += f"**Time:** {timestamp}\n"
display_text += f"**Prompt:** {prompt_display}\n"
display_text += f"**Seed:** {seed} | **Steps:** {steps}\n"
display_text += f"**Execution:** {exec_time:.1f}s\n"
display_text += "---\n"
return display_text
def refresh_history():
"""履歴更新関数"""
return format_history_display()
@gpu_execution() # ZeroGPUがGPUリクエストを検知できるようにする
def generate_txt2img(prompt, negative_prompt="", num_images=1, steps=25, guidance=7.5, size=1024, seed=None, scheduler="default"):
"""
テキストから画像生成(完全なパラメータ対応)
Args:
prompt: メインプロンプト
negative_prompt: ネガティブプロンプト
num_images: 生成画像数
steps: サンプリングステップ数 (10-150)
guidance: CFG Scale/ガイダンス強度 (1-20)
size: 画像サイズ (512, 768, 1024)
seed: シード値 (Noneでランダム)
scheduler: スケジューラータイプ
Returns:
生成された画像のリスト
"""
global txt2img_pipe
if not prompt.strip():
return []
if not model_loaded:
if not setup_model():
return []
try:
logger.info(f"🎨 画像生成開始: {prompt[:50]}...")
start_time = time.time()
# シード設定(0またはNoneの場合はランダム)
if seed is None or seed == 0:
seed = random.randint(1, 2**32-1)
generator = torch.Generator(device="cuda").manual_seed(seed)
# スケジューラー設定
original_scheduler = txt2img_pipe.scheduler
if scheduler != "default":
txt2img_pipe.scheduler = setup_scheduler(txt2img_pipe, scheduler)
# パラメータ設定
params = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"num_inference_steps": int(steps),
"guidance_scale": float(guidance),
"width": int(size),
"height": int(size),
"num_images_per_prompt": 1,
"generator": generator
}
# 画像生成(autocastを使用しない - test_high_quality_generation.pyと同じ)
result = txt2img_pipe(**params)
# スケジューラーを元に戻す
if scheduler != "default":
txt2img_pipe.scheduler = original_scheduler
execution_time = time.time() - start_time
# 画像保存
outputs_dir = Path("outputs")
outputs_dir.mkdir(exist_ok=True)
saved_paths = []
for i, image in enumerate(result.images):
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"txt2img_{timestamp}_seed{seed}_{i+1}.png"
filepath = outputs_dir / filename
image.save(filepath, quality=95)
saved_paths.append(str(filepath))
logger.info(f"💾 画像保存: {filepath}")
# ログ記録(統合ロガー使用)
log_params = {
"num_inference_steps": int(steps),
"guidance_scale": float(guidance),
"width": int(size),
"height": int(size),
"seed": seed,
"scheduler_type": scheduler,
"num_images": num_images,
"torch_dtype": "float16",
"mode": "txt2img"
}
log_generation_details(
prompt=prompt,
negative_prompt=negative_prompt,
params=log_params,
output_filepath=saved_paths[0] if saved_paths else "",
execution_time=execution_time
)
logger.info(f"✅ 生成完了: {execution_time:.2f}秒, {len(result.images)}枚")
return result.images
except Exception as e:
logger.error(f"❌ 画像生成失敗: {e}")
logger.error(traceback.format_exc())
return []
def create_gradio_app():
"""Gradio アプリケーションの作成"""
# カスタムカラーオブジェクトを作成(TV Asahi Blue)
# custom_blue = gr.themes.Color(
# c50="#f0f4ff",
# c100="#dbeafe",
# c200="#bfdbfe",
# c300="#93c5fd",
# c400="#60a5fa",
# c500="#284baf", # メインの色
# c600="#1e40af",
# c700="#1d4ed8",
# c800="#1e3a8a",
# c900="#1e3a8a",
# c950="#172554"
# )
custom_css = """
body,
.gradio-container {
--range-color: #f97316;
background-color: #f6f8ff;
background-image:
url("data:image/svg+xml,%3Csvg%20width%3D%22160%22%20height%3D%22160%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%3E%3Crect%20width%3D%22160%22%20height%3D%22160%22%20fill%3D%22transparent%22%2F%3E%3Cline%20x1%3D%2278%22%20y1%3D%2280%22%20x2%3D%2282%22%20y2%3D%2280%22%20stroke%3D%22rgba%2842%2C42%2C42%2C0.5%29%22%20stroke-width%3D%221.5%22%20stroke-linecap%3D%22round%22%2F%3E%3Cline%20x1%3D%2280%22%20y1%3D%2278%22%20x2%3D%2280%22%20y2%3D%2282%22%20stroke%3D%22rgba%2842%2C42%2C42%2C0.5%29%22%20stroke-width%3D%221.5%22%20stroke-linecap%3D%22round%22%2F%3E%3C%2Fsvg%3E"),
linear-gradient(90deg, rgba(42, 42, 42, 0.1) 0px, rgba(42, 42, 42, 0.1) 1px, transparent 1px, transparent 40px),
linear-gradient(0deg, rgba(42, 42, 42, 0.1) 0px, rgba(42, 42, 42, 0.1) 1px, transparent 1px, transparent 40px),
radial-gradient(circle at 10% 10%, rgba(11, 213, 126, 0.2) 0%, rgba(11, 213, 126, 0) 20%),
linear-gradient(135deg,
rgba(240, 244, 255, 0.4) 0%,
rgba(230, 240, 255, 0.2) 25%,
rgba(220, 235, 255, 0.1) 50%,
rgba(200, 220, 255, 0.15) 75%,
rgba(220, 200, 255, 0.3) 100%
);
background-size:
160px 160px,
40px 40px,
40px 40px,
100% 100%,
100% 100%;
background-position: 0 0, 0 0, 0 0, 0 0, 0 0;
background-repeat: repeat, repeat, repeat, no-repeat, no-repeat;
background-attachment: fixed;
}
/* 主要パネルの透過感を維持 */
.gradio-container .gradio-block {
backdrop-filter: blur(4px);
}
.logo-banner {
position: fixed;
top: 16px;
left: 16px;
z-index: 5;
margin: 0;
padding: 0;
}
.logo-banner svg,
.logo-banner img {
display: block;
width: auto;
height: auto;
}
.gradio-container input[type="range"] {
accent-color: #f97316;
}
.gradio-container input[type="range"]::-webkit-slider-thumb {
background-color: #f97316;
}
.gradio-container input[type="range"]::-moz-range-thumb {
background-color: #f97316;
}
.card-panel {
border-radius: 20px;
background: rgba(255, 255, 255, 0.82);
box-shadow: 0 20px 45px rgba(15, 23, 42, 0.12);
padding: 24px;
border: 1px solid rgba(255, 255, 255, 0.65);
backdrop-filter: blur(10px);
overflow: hidden;
}
.card-panel > * {
width: 100%;
}
.card-panel details {
background: transparent;
border: none;
box-shadow: none;
}
.card-panel details > summary {
font-weight: 600;
}
.card-panel .gradio-image {
background: transparent;
border: none;
box-shadow: none;
}
.card-panel .gradio-image img {
border-radius: 16px;
}
.sample-thumb-row {
display: flex;
gap: 16px;
width: 100%;
flex-wrap: wrap;
}
.sample-thumb {
border-radius: 16px;
overflow: hidden;
box-shadow: 0 20px 45px rgba(15, 23, 42, 0.12);
border: 1px solid rgba(148, 163, 184, 0.55);
background: rgba(255, 255, 255, 0.92);
padding: 0 !important;
position: relative;
}
.sample-thumb img {
width: 100%;
height: 100%;
object-fit: cover;
}
.sample-thumb button[aria-label="Fullscreen"] {
position: absolute;
top: 12px;
right: 16px;
z-index: 5;
}
.generate-btn button {
display: inline-flex;
align-items: center;
justify-content: center;
gap: 8px;
min-height: 62px; /* 約1.2倍の縦幅 */
padding: 18px 36px;
border-radius: 12px;
background: linear-gradient(135deg, #fb923c 0%, #f97316 45%, #ea580c 100%);
color: #ffffff;
font-size: 1.05rem;
font-weight: 600;
letter-spacing: 0.02em;
border: 1px solid rgba(249, 115, 22, 0.5);
box-shadow: 0 20px 45px rgba(15, 23, 42, 0.12);
transition: transform 0.25s ease, box-shadow 0.25s ease, letter-spacing 0.25s ease, background 0.25s ease;
transform: scale(1);
cursor: pointer;
will-change: transform;
background-size: 120% 120%;
}
.generate-btn button:hover,
.generate-btn:hover button {
transform: scale(1.08) !important;
letter-spacing: 0.08em;
box-shadow: 0 24px 50px rgba(15, 23, 42, 0.16);
background-position: 100% 0;
}
.generate-btn button:active,
.generate-btn:active button {
transform: scale(0.96) !important;
letter-spacing: 0.03em;
box-shadow: 0 16px 36px rgba(15, 23, 42, 0.14);
}
.generate-btn button:focus-visible {
outline: 2px solid rgba(249, 115, 22, 0.65);
outline-offset: 3px;
}
.dark .generate-btn button {
color: #1b1b1f;
}
.contain-fullscreen button[aria-label*="Close"],
.contain-fullscreen button[aria-label*="close"],
.contain-fullscreen button[aria-label*="Exit"],
.contain-fullscreen button[aria-label*="exit"],
.contain-fullscreen button[aria-label*="閉じる"] {
margin-right: 18px;
margin-top: 10px;
}
.model-title {
display: inline-flex;
align-items: center;
gap: 12px;
margin: 32px 0 16px;
}
.model-icon {
width: 88px;
height: 88px;
border-radius: 12px;
object-fit: cover;
box-shadow: 0 12px 24px rgba(15, 23, 42, 0.12);
background: rgba(255, 255, 255, 0.85);
}
.model-title-text {
display: flex;
flex-direction: column;
align-items: flex-start;
gap: 6px;
}
.model-name {
font-size: 2.4rem;
font-weight: 600;
}
.model-link {
display: inline-flex;
align-items: center;
gap: 4px;
color: #1f2937;
font-weight: 500;
text-decoration: none;
}
.model-link .link-icon {
font-size: 1.2rem;
opacity: 0.75;
}
.model-link:hover {
text-decoration: underline;
}
"""
# ロゴSVGの読み込み
logo_svg_html = ""
logo_svg_path = Path(__file__).parent / "assets"/ "images" / "logo" / "logo_ai_picasso.svg"
try:
logo_svg_html = logo_svg_path.read_text(encoding="utf-8")
except FileNotFoundError:
logger.warning("⚠️ ロゴSVGが見つかりません: %s", logo_svg_path)
except Exception as exc:
logger.warning("⚠️ ロゴSVG読み込みに失敗しました: %s", exc)
sample_image_names = ["girl1.jpg", "txt2img_20251020_183621_seed1982019515_1.png"]
sample_images = []
sample_dir = Path(__file__).parent / "assets" / "images" / "samples"
for name in sample_image_names:
sample_path = sample_dir / name
if sample_path.exists():
try:
with Image.open(sample_path) as img:
width, height = img.size
except Exception as exc:
logger.warning("⚠️ サンプル画像の読み込みに失敗しました: %s (%s)", sample_path, exc)
width, height = (240, 240)
target_height = 240
scaled_width = max(1, int(round((target_height / height) * width))) if height else 240
sample_images.append({
"path": str(sample_path),
"width": scaled_width,
"height": target_height
})
else:
logger.warning("⚠️ サンプル画像が見つかりません: %s", sample_path)
# メインUI構築
with gr.Blocks(
title="Jagirl",
theme=gr.themes.Default(),
css=custom_css
) as demo:
if logo_svg_html:
gr.HTML(f"<div class='logo-banner'>{logo_svg_html}</div>")
icon_path = Path(__file__).parent / "assets" / "images" / "icon" / "ai_picasso_icon.svg"
icon_html = ""
try:
icon_bytes = icon_path.read_bytes()
icon_b64 = base64.b64encode(icon_bytes).decode("ascii")
icon_html = f"<img src='data:image/svg+xml;base64,{icon_b64}' alt='Jagirl Icon' class='model-icon' />"
except FileNotFoundError:
logger.warning("⚠️ モデルアイコンが見つかりません: %s", icon_path)
except Exception as exc:
logger.warning("⚠️ モデルアイコン読み込みに失敗しました: %s", exc)
title_text_html = (
"<div class='model-title-text'>"
"<span class='model-name'>Jagirl</span>"
"<a class='model-link' href='https://huggingface.co/aipicasso/jagirl' target='_blank' rel='noopener noreferrer'>"
"<span class='link-icon'>&#128279;</span><span>https://huggingface.co/aipicasso/jagirl</span>"
"</a>"
"</div>"
)
gr.HTML(
f"<div class='model-title'>{icon_html}{title_text_html}</div>"
)
with gr.Row():
with gr.Column(scale=2):
with gr.Group(elem_classes=["card-panel"]):
txt_prompt = gr.Textbox(
label="Prompt / プロンプト",
placeholder="Enter your prompt | xxmixgirl, 1girl, black hair, brown eyes, face, shibuya background, sunny day",
lines=3,
max_lines=5
)
txt_negative_prompt = gr.Textbox(
label="Negative Prompt / ネガティブプロンプト",
# イラスト/キャラクター生成向けに調整したネガティブプロンプト
value=(
"(worst quality, low quality:1.4), (illustration, 3d, 2d, painting, cartoons, sketch:1.3),"
" (monochrome, grayscale:1.2), teeth, open mouth, (bad hands, bad fingers, deformed hands,"
" mutated fingers:1.3), watermark, signature, text, logo, extra limbs, malformed limbs,"
" poorly drawn face, poorly drawn hands, mutation, deformed, bad anatomy, bad proportions,"
" duplicate, cropped, jpeg artifacts, blurry, out of focus, oversaturated, artificial lighting"
),
lines=3,
max_lines=5
)
with gr.Group(elem_classes=["card-panel"]):
with gr.Accordion("Advanced Settings / 詳細設定", open=True):
txt_step = gr.Slider(
minimum=10, maximum=150, value=25, step=5,
label="Sampling Steps / サンプリングステップ数 (推奨: 20-40)"
)
txt_guidance = gr.Slider(
minimum=3.0, maximum=15.0, value=7.5, step=0.5,
label="CFG Scale / ガイダンス強度 (推奨: 7-10)"
)
# 画像サイズは1024x1024固定 (UIには表示しない)
# サポート解像度例: 512x512, 768x768, 1024x1024, 1280x1280, 1536x1536
txt_size = 1024 # 固定値
txt_seed = gr.Number(
label="Seed (空欄でランダム)",
value=-1,
precision=0
)
txt_scheduler = gr.Dropdown(
choices=["default", "DDIM", "DPMSolver", "Euler", "EulerA", "LMS", "PNDM"],
value="default",
label="Scheduler / スケジューラー (推奨: DPMSolver)"
)
txt_generate_btn = gr.Button(
"🎨 画像生成開始",
variant="primary",
size="lg",
elem_classes=["generate-btn"]
)
with gr.Column(scale=3):
with gr.Group(elem_classes=["card-panel"]):
txt_gallery = gr.Image(
label="Generated Image / 生成された画像",
type="pil",
interactive=False,
show_label=True,
show_download_button=True,
container=True,
height=None,
width=None
)
if sample_images:
gr.Markdown("## Samples")
with gr.Row(elem_classes=["sample-thumb-row"]):
for info in sample_images:
gr.Image(
value=info["path"],
interactive=False,
type="filepath",
show_label=False,
show_download_button=False,
show_fullscreen_button=True,
elem_classes=["sample-thumb"],
height=info["height"],
width=info["width"]
)
# 画像生成用のラッパー関数(2重呼び出し防止)
def generate_single_image(prompt, neg_prompt, step, guidance, seed, scheduler):
result = generate_txt2img(prompt, neg_prompt, 1, step, guidance, txt_size, seed, scheduler)
return result[0] if result else None
# イベントバインディング
txt_generate_btn.click(
fn=generate_single_image,
inputs=[txt_prompt, txt_negative_prompt, txt_step, txt_guidance, txt_seed, txt_scheduler],
outputs=txt_gallery,
show_progress=True
)
return demo
def main():
"""メインアプリケーション"""
logger.info("🚀 Jagirl Image Generator 起動中...")
# 出力先/ログ保存先のディレクトリを確実に用意
Path("outputs").mkdir(exist_ok=True)
Path("logs").mkdir(exist_ok=True)
# Gradio UI を構築
demo = create_gradio_app()
# ZeroGPU など共有 GPU 環境ではキュー経由で実行した方が安定する(Gradio5では引数無しでOK)
demo.queue()
# Hugging Face Space 上で動いているかどうかを環境変数で判定
running_in_space = os.getenv("SPACE_ID") is not None
if running_in_space:
# Space のヘルスチェックが外部から叩くため 0.0.0.0 で待受
server_name = "0.0.0.0"
# Space では PORT 環境変数にポート番号が渡される(デフォルト 7860)
server_port = int(os.getenv("PORT", 7860))
# ブラウザは開けないので自動オープンを無効化
inbrowser = False
# SSR は Node.js プロセスを起動させるので不要なら無効化
ssr_mode = False
else:
# ローカルでは従来通り localhost で待受し、自動でブラウザを開く
server_name = "127.0.0.1"
server_port = 7860
inbrowser = True
ssr_mode = True # SSR を使わないなら False でも構いません
logger.info("🌐 Webアプリケーションを起動...")
demo.launch(
server_name=server_name,
server_port=server_port,
share=False,
show_error=True,
quiet=False,
inbrowser=inbrowser,
ssr_mode=ssr_mode,
)
if __name__ == "__main__":
main()