import gradio as gr
import torch
import random
import numpy as np
import datetime
# 履歴保存
from huggingface_hub import HfApi
from huggingface_hub import login
import os
# HF_TOKEN 環境変数からトークンを明示的に読み込む
hf_token_value = os.getenv("HF_TOKEN")
if hf_token_value:
api = HfApi(token=hf_token_value)
print("token ok.")
else:
# トークンが設定されていない場合の警告と代替処理
print("HF_TOKEN error")
api = HfApi() # トークンなしで初期化
# 画像をアップロードするリポジトリID
HF_REPO_ID = "cocoat/images"
# 公開用画像をアップロードするリポジトリID
PUBLIC_REPO_ID = "cocoat/opendata"
# Space内で画像を保存するディレクトリ
SPACE_IMAGE_DIR = "generated_images"
os.makedirs(SPACE_IMAGE_DIR, exist_ok=True)
# 公開リポジトリの画像ディレクトリ
PUBLIC_IMAGE_DIR = "generated_images"
os.makedirs(PUBLIC_IMAGE_DIR, exist_ok=True)
# 履歴ファイルを定義
HISTORY_FILE = "history/generation_history_coamixXL3.txt"
# 履歴をロードする関数
import os
import requests
def load_history():
history_data = []
hf_raw_file_url = f"https://huggingface.co/datasets/{HF_REPO_ID}/raw/main/{HISTORY_FILE}"
headers = {}
if hf_token_value:
headers["Authorization"] = f"Bearer {hf_token_value}"
try:
response = requests.get(hf_raw_file_url, headers=headers)
response.raise_for_status()
loaded_hub_paths = set() # 重複ロードを防ぐため
for line in response.text.splitlines():
parts = line.strip().split("|||")
if len(parts) == 2:
image_path_in_repo = parts[0]
caption = parts[1]
# 公開リポジトリの画像URLを生成
hub_image_url = f"https://huggingface.co/datasets/{PUBLIC_REPO_ID}/resolve/main/{image_path_in_repo}"
history_data.append((image_path_in_repo, caption, hub_image_url))
loaded_hub_paths.add(image_path_in_repo)
print(f"History loaded from Hub and matched with Space images: {len(history_data)} entries.")
except requests.exceptions.RequestException as e:
print(f"Error loading history from Hub via raw URL: {e}. Starting with empty history.")
except Exception as e:
print(f"An unexpected error occurred while parsing history: {e}. Starting with empty history.")
return history_data[:10]
# 履歴を初期化時にロード
history = load_history()
from PIL import Image
from diffusers import (
StableDiffusionXLPipeline,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler
)
from huggingface_hub import hf_hub_download, HfApi
# デバイスと型の設定
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
MAX_SEED = np.iinfo(np.int32).max
MAX_SIZE = 2048
# モデルファイルのダウンロード
model_path = hf_hub_download(
repo_id="cocoat/cocoamix",
filename="recocoamixXL3_coamixXL3.safetensors"
)
# パイプライン構築
pipe = StableDiffusionXLPipeline.from_single_file(
model_path,
torch_dtype=torch_dtype,
use_safetensors=True
).to(device)
# スケジューラ設定
euler_scheduler = EulerAncestralDiscreteScheduler.from_config(
pipe.scheduler.config,
use_karras_sigmas=True
)
dpm_scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.scheduler = euler_scheduler
def upload_image_to_hub(image_pil, prompt_text, filename):
# ファイル名を生成(タイムスタンプとプロンプトの一部)
filepath = f"temp_{filename}"
image_pil.save(filepath)
# Hubにアップロード
try:
# リポジトリ内にディレクトリを作成する場合は path_in_repo を使う
path_in_repo = f"generated_images/{filename}"
api.upload_file(
path_or_fileobj=filepath,
path_in_repo=path_in_repo,
repo_id=PUBLIC_REPO_ID, # 公開用リポジトリに変更
repo_type="dataset",
)
# アップロードされたファイルのURLを構築する(PUBLIC_REPO_IDを使用)
uploaded_file_url = f"https://huggingface.co/datasets/{PUBLIC_REPO_ID}/resolve/main/{path_in_repo}"
print(f"Uploaded {filepath} to {uploaded_file_url}")
# 公開リポジトリの古いファイルを削除するロジック
current_files = api.list_repo_files(repo_id=PUBLIC_REPO_ID, repo_type="dataset")
# PUBLIC_IMAGE_DIR (generated_images) 以下のpngファイルを抽出し、新しいものからソート
generated_images_in_public = sorted([f for f in current_files if f.startswith(PUBLIC_IMAGE_DIR) and f.endswith('.png')], reverse=True)
# 10枚を超える場合、古いファイルを削除
if len(generated_images_in_public) > 10:
files_to_delete = generated_images_in_public[10:]
for file_to_delete in files_to_delete:
try:
api.delete_file(
path_in_repo=file_to_delete,
repo_id=PUBLIC_REPO_ID,
repo_type="dataset",
commit_message=f"Delete old image: {file_to_delete}"
)
print(f"Deleted old public image: {file_to_delete}")
except Exception as del_e:
print(f"Error deleting old public image {file_to_delete}: {del_e}")
return uploaded_file_url, path_in_repo
except Exception as e:
print(f"Error uploading image to Hub: {e}")
return None, None
finally:
pass
def upload_image_to_private_hub(image_pil, prompt_text, filename):
filepath = f"temp_private_{filename}"
image_pil.save(filepath)
try:
path_in_repo = f"generated_images/{filename}"
api.upload_file(
path_or_fileobj=filepath,
path_in_repo=path_in_repo,
repo_id=HF_REPO_ID, # 非公開リポジトリ
repo_type="dataset",
)
print(f"Uploaded {filepath} to private Hub: {path_in_repo}")
return path_in_repo # 履歴ファイルに記録するリポジトリ内パスを返す
except Exception as e:
print(f"Error uploading image to private Hub: {e}")
return None
finally:
pass
def make_html_table(caption):
formatted_caption = caption.replace("|-|", "\n")
rows = formatted_caption.split("\n")
html = '
'
for row in rows:
if ": " in row:
key, val = row.split(": ", 1)
html += (
# f'{key}: {val}\n'
f'| {key} | '
f'{val} |
'
)
html += '
'
return html
def create_dummy_image(width=512, height=512, alpha=0):
return Image.new("RGBA", (width, height), (0, 0, 0, alpha))
def update_history_tables_on_select(evt: gr.SelectData):
if evt.index is not None and 0 <= evt.index < len(history):
selected_caption = history[evt.index][1]
selected_image_url = history[evt.index][2]
return make_html_table(selected_caption), selected_image_url
return "", None
def update_history():
tables_html = "".join(
f'{make_html_table(item[1])}
'
for item in history
)
return tables_html
def infer(prompt, neg, seed, rand, w, h, cfg, steps, scheduler_type,
progress=gr.Progress(track_tqdm=True)):
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"image_{timestamp}.png"
filepath = f"temp_{filename}" # ここで一時ファイルのパスを定義
try:
gc.collect() # 追加
if torch.cuda.is_available(): # 追加
torch.cuda.empty_cache() # 追加
if rand:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
pipe.scheduler = euler_scheduler if scheduler_type == "Euler Ancestral" else dpm_scheduler
pipe.scheduler.set_timesteps(steps)
def _callback(pipeline, step_idx, timestep, callback_kwargs):
progress(step_idx / steps, desc=f"Step {step_idx}/{steps}")
return callback_kwargs
output = pipe(
prompt=prompt,
negative_prompt=neg or None,
guidance_scale=cfg,
num_inference_steps=steps,
width=w,
height=h,
generator=generator,
callback_on_step_end=_callback
)
img = output.images[0]
img.save(filepath)
caption_text = (
f"Prompt: {prompt}\n"
f"Negative: {neg or 'None'}\n"
f"Seed: {seed}\n"
f"Size: {w}×{h}\n"
f"CFG: {cfg}\n"
f"Steps: {steps}\n"
f"Scheduler: {scheduler_type}"
)
caption_text_for_history = caption_text.replace("\n", "|-|").strip()
# 画像をHubにアップロードし、そのURLとリポジトリ内パスを取得
# 公開用リポジトリにアップロード
uploaded_image_url, path_in_public_repo_for_history = upload_image_to_hub(img, caption_text, filename)
# 非公開リポジトリにアップロード
path_in_private_repo_for_history = upload_image_to_private_hub(img, caption_text, filename)
# 履歴を更新
global history
# Hubへのアップロードが成功した場合のみ履歴に追加
# historyリストには (非公開リポジトリのパス, キャプション, 公開リポジトリのURL) の形式で保存
if path_in_private_repo_for_history and uploaded_image_url:
history.insert(0, (path_in_private_repo_for_history, caption_text_for_history, uploaded_image_url))
else:
print(f"Skipping history update due to failed Hub upload.")
history_max_items = 10
if len(history) > history_max_items:
history.pop()
# 履歴ファイルを更新し、Hubにアップロードする
temp_history_filepath = "temp_history.txt"
with open(temp_history_filepath, "w", encoding="utf-8") as f:
for img_path_in_repo, cap_text, _ in history:
f.write(f"{img_path_in_repo}|||{cap_text}\n")
try:
api.upload_file(
path_or_fileobj=temp_history_filepath,
path_in_repo=HISTORY_FILE,
repo_id=HF_REPO_ID,
repo_type="dataset",
)
print(f"History file '{HISTORY_FILE}' updated on Hugging Face Hub.")
except Exception as e:
print(f"Error updating history file on Hub: {e}")
finally:
if os.path.exists(temp_history_filepath):
os.remove(temp_history_filepath)
progress(1.0, desc="Done!")
# ギャラリー表示用のアイテムリストを生成(Hub上のURLを使用)
gallery_items = [(item[2], item[1].replace("|-|", "\n")) for item in history]
processed_img, processed_gallery_items = process_image(img, gallery_items)
if processed_img is None or processed_gallery_items is None:
# These two lines must be indented
print("Image processing failed, skipping history update.")
return None, history_gallery, history_tables
latest_caption_table = make_html_table(caption_text)
return processed_img, processed_gallery_items, latest_caption_table
finally:
# infer関数の最後に一時ファイルを削除
if os.path.exists(filepath):
os.remove(filepath)
print(f"生成画像の一時ファイル {filepath} を削除しました。")
import gc
import torch
def process_image(img, gallery_items): # Assuming this is part of a function
try:
gc.collect()
# Clear PyTorch's cache if GPU memory is being used
if torch.cuda.is_available():
torch.cuda.empty_cache()
return img, gallery_items
except RuntimeError as e:
# Catch errors like CUDA Out of Memory
error_message = f"error in generate: {e}\n\n"
if "CUDA out of memory" in str(e):
error_message += "memory error"
else:
error_message += "other error"
print(error_message) # Output to server logs
return None, None
# CSS 設定(ダークモード強制防止+カフェ風テーマ)
css = """
@import url('https://fonts.googleapis.com/css2?family=Playpen+Sans+Hebrew:wght@100;200;300;400;500;600;700;800&display=swap');
body {
background-color: #f4e1c1 !important;
font-family:'Playpen Sans Hebrew', ‘Georgia’, serif !important;
color: #000 !important;
}
html, .gradio-container, .dark, .dark * {
background: #fffaf1 !important;
color: #000 !important;
}
.scroll_lists::-webkit-scrollbar {
width: 16px;
}
.scroll_lists::-webkit-scrollbar-track {
background-color: #e4e4e4;
border-radius: 100px;
}
.scroll_lists::-webkit-scrollbar-thumb {
background-color: #f4aa90;
border-radius: 100px;
}
#col-container {
background: #fffaf1;
padding: 20px;
border-radius: 16px;
box-shadow: 0 4px 12px rgba(0,0,0,0.1);
margin: auto;
max-width: 780px;
}
.gr-button {
background-color: #d4a373 !important;
color: white !important;
border-radius: 8px !important;
padding: 10px 24px !important;
font-weight: bold;
transition: background-color 0.3s;
}
.gr-button:hover {
background-color: #c48f61 !important;
}
.gr-textbox, .gr-slider, .gr-radio, .gr-checkbox, .gr-image {
background: #fff;
border-radius: 8px;
}
.gr-gallery {
background: #fffaf1;
padding: 10px;
border-radius: 12px;
}
.gr-gallery .gallery-item Figcaption,
.gr-gallery .gallery-item figcaption {
width:420px !important;
word-wrap:break-word !important;
display: none !important;
}
.caption.svelte-842rpi.svelte-842rpi{
display: none !important;
}
.gradio-spinner { display: none !important; }
#component-25, .gradio-container.gradio-container-5-25-2 .contain .image-frame.svelte-w225pd{
height: 50vw !important;
}
.image-container.svelte-w225pd.svelte-w225pd{
object-fit: fill !important;
}
#component-25 > div > img {
object-fit: fill !important;
}
#component-25 {
}
#component-25 .gr-image {
}
#component-25 .gr-image > div {
}
.image-frame.svelte-w225pd {
text-align:center;
}
.image-frame.svelte-w225pd img{
height: 100% !important;
display: block;
margin: auto;
object-fit: contain;
}
.block.svelte-11xb1hd {
background: #efd1bf !important;
}
span.svelte-g2oxp3, label.svelte-5ncdh7.svelte-5ncdh7.svelte-5ncdh7 {
color: #915325 !important;
}
.svelte-zyxd38 g {
display: none !important;
}
.secondary.svelte-1ixn6qd {
background: #dca08a !important;
color: #631c00 !important;
}
:root {
--color-accent: #a57659;
}
.max_value.svelte-10lj3xl.svelte-10lj3xl, span.min_value {
color: #a54618 !important;
}
@keyframes fadeLetter {
0%,100% { opacity: 1; }
50% { opacity: 0.2; }
}
.nobackground, .nobackground div, .nobackground.parent.parent.parent {
background-color: transparent !important;
}
progress::-webkit-progress-value {
background-color: #a57659 !important;
}
progress::-moz-progress-bar {
background-color: #a57659 !important;
}
.gradio-progress .progress-bar,
.gradio-progress-bar {
background-color: #a57659 !important;
}
#custom-loader {
align-items: center;
justify-content: center;
font-weight: bold;
position: absolute !important;
bottom: 40% !important;
left: 50% !important;
transform: translate(-50%, -50%) !important;
width: 100vw !important;
height: 100vh !important;
z-index: 9999 !important;
display: flex;
/* background-color: rgba(255, 250, 241, 0) !important;*/
}
#custom-loader, .loading-text {
width: auto !important;
height: auto !important;
}
#custom-loader .loading-text span {
display: inline-block;
animation: fadeLetter 1.8s ease-in-out infinite;
font-size:1.5em;
}
#custom-loader img {
width: 32px;
height: 32px;
border-radius: 50%;
margin-left: 8px;
display: inline-block;
animation: jump 2s infinite ease-in-out;
vertical-align: middle;
}
@keyframes jump {
0%, 100% { transform: translateY(10px); opacity: 1;}
50% { transform: translateY(-10px); opacity: 1;}
}
.grid-wrap.svelte-842rpi.svelte-842rpi{
overflow:auto !important;
}
#component-27{
overflow-y: scroll !important;
scrollbar-color: #915325 rgb(239, 209, 191);
}
/*.grid-container.svelte-842rpi{
display: flex;
flex-wrap: wrap;
}
.thumbnail-item.svelte-842rpi.svelte-842rpi{
width: 128px;
}*/
"""
with gr.Blocks(css=css, theme=gr.themes.Default(font=[gr.themes.GoogleFont("Playpen Sans Hebrew"), "sans-serif"])) as demo:
with gr.Column(elem_id="col-container"):
gr.HTML('SDXL – Re:cocoamixXL3 (coamixXL3) Demo
The log is shared with other. (No more than 10 images will be displayed in history.)
Please use this model at your own risk. I am not responsible in any way for any problems with the generated images.')
gr.HTML('')
gr.HTML('Not create NSFW at use this model.')
with gr.Row():
prompt = gr.Textbox(lines=1, placeholder="Prompt…", value="1girl, cocoart, masterpiece, anime, high quality,", label="Prompt")
neg = gr.Textbox(lines=1, placeholder="Negative prompt", value="low quality, worst quality, bad, bad lighting, lowres, error, miss stroke, smoke, ugly, extra digits, creepy, imprecise, blurry,", label="Negative prompt")
with gr.Row():
seed_sl = gr.Slider(0, MAX_SEED, step=1, value=0, label="Seed")
rand = gr.Checkbox(True, label="Randomize seed")
with gr.Row():
width = gr.Slider(256, 512, step=32, value=512, label="Width")
height = gr.Slider(256, 768, step=32, value=512, label="Height")
with gr.Row():
cfg = gr.Slider(1.0, 30.0, step=0.5, value=6, label="CFG Scale")
steps = gr.Slider(1, 12, step=1, value=12, label="Steps")
with gr.Row():
scheduler_type = gr.Radio(choices=["Euler Ancestral", "DPM++ 2M SDE"], value="Euler Ancestral", label="Scheduler")
run = gr.Button("Generate")
# カスタムローダー
gr.HTML(
"""
"""
)
img_out = gr.Image(
interactive=None,
value=create_dummy_image(width=512, height=512, alpha=0),
label="Generate Image"
)
state = gr.State([])
history_gallery = gr.Gallery(
label="History(max10)",
columns=4,
height=280,
show_label=False,
interactive=None,
type="auto",
value=[]
)
# テーブル部分だけを下にまとめて生HTMLレンダー
history_tables = gr.HTML(value="")
run.click(
fn=infer,
inputs=[prompt, neg, seed_sl, rand, width, height, cfg, steps, scheduler_type],
outputs=[img_out, history_gallery, history_tables]
)
history_gallery.select(
fn=update_history_tables_on_select,
inputs=None,
outputs=[history_tables, img_out]
)
# ページロード時に history から初期表示
demo.load(
fn=lambda: ( # history リストの各要素が (Hub上のファイルパス, キャプション, Space内のファイルパス)
[ (item[2], item[1].replace("|-|", "\n")) for item in history if item[2] is not None ],
make_html_table(history[0][1]) if history else "" # 最初のアイテムのキャプションを表示
),
inputs=[],
outputs=[history_gallery, history_tables]
)
demo.queue()
demo.launch()