mcuo commited on
Commit
cf651d1
·
verified ·
1 Parent(s): 890fc2e

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -31
app.py CHANGED
@@ -30,19 +30,13 @@ compel = Compel(
30
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
31
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
32
  requires_pooled=[False, True],
33
- truncate_long_prompts=False
34
  )
35
 
36
  MAX_SEED = np.iinfo(np.int32).max
37
  MAX_IMAGE_SIZE = 1216
38
 
39
- def process_long_prompt(prompt, negative_prompt=""):
40
- try:
41
- conditioning, pooled = compel([prompt, negative_prompt])
42
- return conditioning, pooled
43
- except Exception as e:
44
- print(f"Long prompt processing failed: {e}, falling back to standard processing")
45
- return None, None
46
 
47
  @spaces.GPU(duration=10)
48
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
@@ -54,28 +48,38 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
54
  generator = torch.Generator(device=device).manual_seed(seed)
55
 
56
  try:
57
- # 常にcompelを使って長いプロンプトに対応する
58
- conditioning, pooled = process_long_prompt(prompt, negative_prompt)
59
-
60
- # process_long_promptが成功したかチェック
61
- if conditioning is not None and pooled is not None:
62
- # compelで生成した埋め込みを使用して画像を生成
63
- return pipe(
64
- prompt_embeds=conditioning[0:1], pooled_prompt_embeds=pooled[0:1],
65
- negative_prompt_embeds=conditioning[1:2], negative_pooled_prompt_embeds=pooled[1:2],
66
- guidance_scale=guidance_scale, num_inference_steps=num_inference_steps,
67
- width=width, height=height, generator=generator
68
- ).images[0]
69
- else:
70
- # compelの処理に失敗した場合は、標準のパイプラインにフォールバック
71
- return pipe(
72
- prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale,
73
- num_inference_steps=num_inference_steps, width=width, height=height, generator=generator
74
- ).images[0]
 
 
 
 
 
 
 
75
  except RuntimeError as e:
 
76
  print(f"Error during generation: {e}")
 
77
  return Image.new('RGB', (width, height), color=(0, 0, 0))
78
 
 
79
  css = """
80
  #col-container {
81
  margin: 0 auto;
@@ -104,7 +108,6 @@ with gr.Blocks(css=css) as demo:
104
  negative_prompt = gr.Text(
105
  label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt",
106
  value="bad quality, low quality, worst quality, worst detail, 3d, sketch, censorship"
107
- # value="bad quality, worst quality, worst detail, 3d, sketch, cencer"
108
  )
109
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
110
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
@@ -118,7 +121,7 @@ with gr.Blocks(css=css) as demo:
118
  gr.Examples(
119
  examples=[
120
  ["masterpiece, best quality, amazing quality, little girl"],
121
- ["masterpiece, best quality, amazing quality, little 1girl with blonde short side tails, perfectly round red eyes, "],
122
  ],
123
  inputs=[prompt],
124
  label="Examples (Click to copy to prompt)"
@@ -138,7 +141,6 @@ with gr.Blocks(css=css) as demo:
138
  is_prompt_valid = current_prompt.strip().lower().startswith("masterpiece")
139
  return False, gr.update(value="Lock"), gr.update(interactive=is_prompt_valid)
140
 
141
- # ▼▼▼ 変更 ▼▼▼
142
  # Trashボタンが押されたときの関数
143
  def clear_and_unlock():
144
  # プロンプトを空に、Generateを無効化、Lockボタンをリセット、ロック状態を解除
@@ -164,7 +166,6 @@ with gr.Blocks(css=css) as demo:
164
  inputs=None,
165
  outputs=[prompt, run_button, lock_button, lock_state]
166
  )
167
- # ▲▲▲ 変更 ▲▲▲
168
 
169
  run_button.click(
170
  fn=infer,
@@ -172,4 +173,4 @@ with gr.Blocks(css=css) as demo:
172
  outputs=[result]
173
  )
174
 
175
- demo.queue().launch()
 
30
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
31
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
32
  requires_pooled=[False, True],
33
+ truncate_long_prompts=False # これがTrueだと長いプロンプトが切り捨てられるためFalseで正しい
34
  )
35
 
36
  MAX_SEED = np.iinfo(np.int32).max
37
  MAX_IMAGE_SIZE = 1216
38
 
39
+ # process_long_prompt 関数は不要なので削除します
 
 
 
 
 
 
40
 
41
  @spaces.GPU(duration=10)
42
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
48
  generator = torch.Generator(device=device).manual_seed(seed)
49
 
50
  try:
51
+ # ▼▼▼▼▼ 変更箇所 ▼▼▼▼▼
52
+ # 常にcompelを使ってプロンプトとネガティブプロンプトを別々に処理する
53
+ # これにより、フォールバックロジックが不要になり、エラーの原因が取り除かれる
54
+
55
+ # ポジティブプロンプトの埋め込みを生成
56
+ prompt_embeds, pooled_prompt_embeds = compel(prompt)
57
+
58
+ # ネガティブプロンプトの埋め込みを生成
59
+ negative_prompt_embeds, negative_pooled_prompt_embeds = compel(negative_prompt)
60
+
61
+ # compelで生成した埋め込みを使用して画像を生成
62
+ image = pipe(
63
+ prompt_embeds=prompt_embeds,
64
+ pooled_prompt_embeds=pooled_prompt_embeds,
65
+ negative_prompt_embeds=negative_prompt_embeds,
66
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
67
+ guidance_scale=guidance_scale,
68
+ num_inference_steps=num_inference_steps,
69
+ width=width,
70
+ height=height,
71
+ generator=generator
72
+ ).images[0]
73
+ return image
74
+ # ▲▲▲▲▲ 変更箇所 ▲▲▲▲▲
75
+
76
  except RuntimeError as e:
77
+ # GPUメモリ不足などの実行時エラーを捕捉
78
  print(f"Error during generation: {e}")
79
+ # エラーが発生した場合は、黒い画像を返す
80
  return Image.new('RGB', (width, height), color=(0, 0, 0))
81
 
82
+
83
  css = """
84
  #col-container {
85
  margin: 0 auto;
 
108
  negative_prompt = gr.Text(
109
  label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt",
110
  value="bad quality, low quality, worst quality, worst detail, 3d, sketch, censorship"
 
111
  )
112
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
113
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
121
  gr.Examples(
122
  examples=[
123
  ["masterpiece, best quality, amazing quality, little girl"],
124
+ ["masterpiece, best quality, amazing quality, little 1girl with blonde short side tails, perfectly round red eyes, wearing a detailed fantasy adventurer outfit, exploring a mysterious ancient forest filled with glowing mushrooms and magical creatures"],
125
  ],
126
  inputs=[prompt],
127
  label="Examples (Click to copy to prompt)"
 
141
  is_prompt_valid = current_prompt.strip().lower().startswith("masterpiece")
142
  return False, gr.update(value="Lock"), gr.update(interactive=is_prompt_valid)
143
 
 
144
  # Trashボタンが押されたときの関数
145
  def clear_and_unlock():
146
  # プロンプトを空に、Generateを無効化、Lockボタンをリセット、ロック状態を解除
 
166
  inputs=None,
167
  outputs=[prompt, run_button, lock_button, lock_state]
168
  )
 
169
 
170
  run_button.click(
171
  fn=infer,
 
173
  outputs=[result]
174
  )
175
 
176
+ demo.queue().launch()