cocoat commited on
Commit
78d5b24
·
verified ·
1 Parent(s): 51c11b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -81
app.py CHANGED
@@ -1,94 +1,44 @@
1
- import gradio as gr
2
  import torch
3
- import random
4
- import numpy as np
5
- from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
6
  from huggingface_hub import hf_hub_download
 
7
 
8
- # --- 設定 ---
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
11
- MAX_SEED = np.iinfo(np.int32).max
12
- MAX_IMAGE_SIZE = 2048
13
-
14
- # --- safetensors ファイルの正しい取得 ---
15
  model_path = hf_hub_download(
16
  repo_id="cocoat/cocoamix",
17
  filename="recocoamixXL3_coamixXL3.safetensors"
18
  )
19
 
20
- # --- パイプライン読み込み ---
21
- pipe = StableDiffusionXLPipeline.from_single_file(
22
- "https://huggingface.co/cocoat/cocoamix/resolve/main/recocoamixXL3_coamixXL3.safetensors",
23
- torch_dtype=torch_dtype,
24
- use_safetensors=True
25
- ).to(device)
26
-
27
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
28
- pipe.scheduler.config,
29
- use_karras_sigmas=True
30
- )
31
-
32
- # 生成履歴
33
- history = []
34
-
35
- def infer(
36
- prompt, negative_prompt, seed, randomize_seed,
37
- width, height, cfg_scale, num_inference_steps,
38
- progress=gr.Progress(track_tqdm=True)
39
- ):
40
- if randomize_seed:
41
- seed = random.randint(0, MAX_SEED)
42
- gen = torch.Generator(device=device).manual_seed(seed)
43
- pipe.scheduler.set_timesteps(num_inference_steps)
44
 
45
- def _callback(pipeline, step_idx, timestep, callback_kwargs):
46
- progress(step_idx / num_inference_steps,
47
- desc=f"Step {step_idx}/{num_inference_steps}")
48
- return callback_kwargs
49
-
50
- out = pipe(
51
  prompt=prompt,
52
- negative_prompt=negative_prompt or None,
 
53
  guidance_scale=cfg_scale,
54
- num_inference_steps=num_inference_steps,
55
- width=width, height=height,
56
- generator=gen,
57
- callback_on_step_end=_callback
58
- )
59
- img = out.images[0]
60
- history.insert(0, img)
61
- progress(1.0, desc="Done!")
62
- return img, seed, history
63
-
64
- css = "#col-container{margin:auto; max-width:720px}"
65
- examples = [["1girl, cocoart, masterpiece", "", 0, True, 1024, 1024, 5, 23]]
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown("## recocoamixXL3 demo")
70
- prompt = gr.Textbox(lines=1, placeholder="Prompt…")
71
- run = gr.Button("Generate")
72
- img_out = gr.Image()
73
- seed_out = gr.Textbox(label="Seed", interactive=False)
74
- history_gallery = gr.Gallery(label="生成履歴", columns=4, height=200)
75
-
76
- with gr.Accordion("Advanced Settings", open=False):
77
- neg = gr.Textbox(lines=1, placeholder="Negative prompt")
78
- seed_sl = gr.Slider(0, MAX_SEED, step=1, value=0, label="Seed")
79
- rand = gr.Checkbox(True, label="Randomize seed")
80
- width = gr.Slider(256, MAX_IMAGE_SIZE, step=32, value=1024, label="Width")
81
- height = gr.Slider(256, MAX_IMAGE_SIZE, step=32, value=1024, label="Height")
82
- cfg = gr.Slider(1.0, 30.0, step=0.1, value=5, label="CFG Scale")
83
- steps = gr.Slider(1, 50, step=1, value=23, label="Steps")
84
-
85
- gr.Examples(examples, [prompt, neg, seed_sl, rand, width, height, cfg, steps])
86
-
87
- run.click(
88
- fn=infer,
89
- inputs=[prompt, neg, seed_sl, rand, width, height, cfg, steps],
90
- outputs=[img_out, seed_out, history_gallery]
91
- )
92
 
93
- demo.queue()
94
  demo.launch(share=True)
 
 
1
  import torch
2
+ from diffusers import StableDiffusionXLPipeline
 
 
3
  from huggingface_hub import hf_hub_download
4
+ import gradio as gr
5
 
6
+ # モデルを Hugging Face からダウンロード
 
 
 
 
 
 
7
  model_path = hf_hub_download(
8
  repo_id="cocoat/cocoamix",
9
  filename="recocoamixXL3_coamixXL3.safetensors"
10
  )
11
 
12
+ # パイプラインを safetensors 単一ファイルからロード
13
+ pipe = StableDiffusionXLPipeline.from_single_file(model_path, torch_dtype=torch.float16)
14
+ pipe.to("cuda") # CUDAに載せる(必要に応じて"cpu"に変更可)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # 生成関数
17
+ def generate(prompt, negative_prompt, steps, cfg_scale, seed):
18
+ generator = torch.manual_seed(seed) if seed else None
19
+ image = pipe(
 
 
20
  prompt=prompt,
21
+ negative_prompt=negative_prompt,
22
+ num_inference_steps=steps,
23
  guidance_scale=cfg_scale,
24
+ generator=generator,
25
+ ).images[0]
26
+ return image
27
+
28
+ # Gradio インターフェース
29
+ demo = gr.Interface(
30
+ fn=generate,
31
+ inputs=[
32
+ gr.Textbox(label="Prompt"),
33
+ gr.Textbox(label="Negative Prompt"),
34
+ gr.Slider(minimum=10, maximum=50, value=30, label="Steps"),
35
+ gr.Slider(minimum=1.0, maximum=15.0, value=7.5, label="CFG Scale"),
36
+ gr.Number(value=42, label="Seed (空でランダム)")
37
+ ],
38
+ outputs=gr.Image(type="pil"),
39
+ title="recocoamixXL3_coamixXL3 Generator",
40
+ description="Hugging Faceから直接読み込んだSDXLベースモデルで画像を生成します。"
41
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
+ # 公開用(必要に応じて share=True)
44
  demo.launch(share=True)