Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
cf0a3d5
1
Parent(s):
d761edd
update app
Browse files- app.py +1 -2
- src/attention_processor.py +1 -2
app.py
CHANGED
|
@@ -87,7 +87,7 @@ def get_samples():
|
|
| 87 |
"image": "assets/2.jpg",
|
| 88 |
"scale": 0.6,
|
| 89 |
"seed": 42,
|
| 90 |
-
"text": "A photo of a
|
| 91 |
},
|
| 92 |
{
|
| 93 |
"image": "assets/character1.jpg",
|
|
@@ -169,7 +169,6 @@ def create_app():
|
|
| 169 |
text_input = gr.Textbox(
|
| 170 |
lines=2,
|
| 171 |
label="Text prompt",
|
| 172 |
-
value="A photo of a dragon, imaginative, creative, design",
|
| 173 |
elem_id="text"
|
| 174 |
)
|
| 175 |
|
|
|
|
| 87 |
"image": "assets/2.jpg",
|
| 88 |
"scale": 0.6,
|
| 89 |
"seed": 42,
|
| 90 |
+
"text": "A photo of a monster cartoon character, imaginative, creative, design",
|
| 91 |
},
|
| 92 |
{
|
| 93 |
"image": "assets/character1.jpg",
|
|
|
|
| 169 |
text_input = gr.Textbox(
|
| 170 |
lines=2,
|
| 171 |
label="Text prompt",
|
|
|
|
| 172 |
elem_id="text"
|
| 173 |
)
|
| 174 |
|
src/attention_processor.py
CHANGED
|
@@ -7,7 +7,7 @@ from typing import Callable, List, Optional, Tuple, Union
|
|
| 7 |
class FluxBlendedAttnProcessor2_0(nn.Module):
|
| 8 |
"""Attention processor used typically in processing the SD3-like self-attention projections."""
|
| 9 |
|
| 10 |
-
def __init__(self, hidden_dim, ba_scale=1.0, num_ref=1
|
| 11 |
super().__init__()
|
| 12 |
if not hasattr(F, "scaled_dot_product_attention"):
|
| 13 |
raise ImportError("FluxBlendedAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
|
@@ -16,7 +16,6 @@ class FluxBlendedAttnProcessor2_0(nn.Module):
|
|
| 16 |
self.blended_attention_v_proj = nn.Linear(hidden_dim, hidden_dim, bias=False)
|
| 17 |
self.ba_scale = ba_scale
|
| 18 |
self.num_ref = num_ref
|
| 19 |
-
self.temperature = temperature # this is used only when num_ref > 1
|
| 20 |
|
| 21 |
def __call__(
|
| 22 |
self,
|
|
|
|
| 7 |
class FluxBlendedAttnProcessor2_0(nn.Module):
|
| 8 |
"""Attention processor used typically in processing the SD3-like self-attention projections."""
|
| 9 |
|
| 10 |
+
def __init__(self, hidden_dim, ba_scale=1.0, num_ref=1):
|
| 11 |
super().__init__()
|
| 12 |
if not hasattr(F, "scaled_dot_product_attention"):
|
| 13 |
raise ImportError("FluxBlendedAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
|
|
|
| 16 |
self.blended_attention_v_proj = nn.Linear(hidden_dim, hidden_dim, bias=False)
|
| 17 |
self.ba_scale = ba_scale
|
| 18 |
self.num_ref = num_ref
|
|
|
|
| 19 |
|
| 20 |
def __call__(
|
| 21 |
self,
|