prithivMLmods commited on
Commit
deb831f
Β·
verified Β·
1 Parent(s): 8b08c36

Delete qwenimage

Browse files
qwenimage/__init__.py DELETED
File without changes
qwenimage/qwen_fa3_processor.py DELETED
@@ -1,142 +0,0 @@
1
- """
2
- Paired with a good language model. Thanks!
3
- """
4
-
5
- import torch
6
- from typing import Optional, Tuple
7
- from diffusers.models.transformers.transformer_qwenimage import apply_rotary_emb_qwen
8
-
9
- try:
10
- from kernels import get_kernel
11
- _k = get_kernel("kernels-community/vllm-flash-attn3")
12
- _flash_attn_func = _k.flash_attn_func
13
- except Exception as e:
14
- _flash_attn_func = None
15
- _kernels_err = e
16
-
17
-
18
- def _ensure_fa3_available():
19
- if _flash_attn_func is None:
20
- raise ImportError(
21
- "FlashAttention-3 via Hugging Face `kernels` is required. "
22
- "Tried `get_kernel('kernels-community/vllm-flash-attn3')` and failed with:\n"
23
- f"{_kernels_err}"
24
- )
25
-
26
- @torch.library.custom_op("flash::flash_attn_func", mutates_args=())
27
- def flash_attn_func(
28
- q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, causal: bool = False
29
- ) -> torch.Tensor:
30
- outputs, lse = _flash_attn_func(q, k, v, causal=causal)
31
- return outputs
32
-
33
- @flash_attn_func.register_fake
34
- def _(q, k, v, **kwargs):
35
- # two outputs:
36
- # 1. output: (batch, seq_len, num_heads, head_dim)
37
- # 2. softmax_lse: (batch, num_heads, seq_len) with dtype=torch.float32
38
- meta_q = torch.empty_like(q).contiguous()
39
- return meta_q #, q.new_empty((q.size(0), q.size(2), q.size(1)), dtype=torch.float32)
40
-
41
-
42
- class QwenDoubleStreamAttnProcessorFA3:
43
- """
44
- FA3-based attention processor for Qwen double-stream architecture.
45
- Computes joint attention over concatenated [text, image] streams using vLLM FlashAttention-3
46
- accessed via Hugging Face `kernels`.
47
-
48
- Notes / limitations:
49
- - General attention masks are not supported here (FA3 path). `is_causal=False` and no arbitrary mask.
50
- - Optional windowed attention / sink tokens / softcap can be plumbed through if you use those features.
51
- - Expects an available `apply_rotary_emb_qwen` in scope (same as your non-FA3 processor).
52
- """
53
-
54
- _attention_backend = "fa3" # for parity with your other processors, not used internally
55
-
56
- def __init__(self):
57
- _ensure_fa3_available()
58
-
59
- @torch.no_grad()
60
- def __call__(
61
- self,
62
- attn, # Attention module with to_q/to_k/to_v/add_*_proj, norms, to_out, to_add_out, and .heads
63
- hidden_states: torch.FloatTensor, # (B, S_img, D_model) image stream
64
- encoder_hidden_states: torch.FloatTensor = None, # (B, S_txt, D_model) text stream
65
- encoder_hidden_states_mask: torch.FloatTensor = None, # unused in FA3 path
66
- attention_mask: Optional[torch.FloatTensor] = None, # unused in FA3 path
67
- image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # (img_freqs, txt_freqs)
68
- ) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
69
- if encoder_hidden_states is None:
70
- raise ValueError("QwenDoubleStreamAttnProcessorFA3 requires encoder_hidden_states (text stream).")
71
- if attention_mask is not None:
72
- # FA3 kernel path here does not consume arbitrary masks; fail fast to avoid silent correctness issues.
73
- raise NotImplementedError("attention_mask is not supported in this FA3 implementation.")
74
-
75
- _ensure_fa3_available()
76
-
77
- B, S_img, _ = hidden_states.shape
78
- S_txt = encoder_hidden_states.shape[1]
79
-
80
- # ---- QKV projections (image/sample stream) ----
81
- img_q = attn.to_q(hidden_states) # (B, S_img, D)
82
- img_k = attn.to_k(hidden_states)
83
- img_v = attn.to_v(hidden_states)
84
-
85
- # ---- QKV projections (text/context stream) ----
86
- txt_q = attn.add_q_proj(encoder_hidden_states) # (B, S_txt, D)
87
- txt_k = attn.add_k_proj(encoder_hidden_states)
88
- txt_v = attn.add_v_proj(encoder_hidden_states)
89
-
90
- # ---- Reshape to (B, S, H, D_h) ----
91
- H = attn.heads
92
- img_q = img_q.unflatten(-1, (H, -1))
93
- img_k = img_k.unflatten(-1, (H, -1))
94
- img_v = img_v.unflatten(-1, (H, -1))
95
-
96
- txt_q = txt_q.unflatten(-1, (H, -1))
97
- txt_k = txt_k.unflatten(-1, (H, -1))
98
- txt_v = txt_v.unflatten(-1, (H, -1))
99
-
100
- # ---- Q/K normalization (per your module contract) ----
101
- if getattr(attn, "norm_q", None) is not None:
102
- img_q = attn.norm_q(img_q)
103
- if getattr(attn, "norm_k", None) is not None:
104
- img_k = attn.norm_k(img_k)
105
- if getattr(attn, "norm_added_q", None) is not None:
106
- txt_q = attn.norm_added_q(txt_q)
107
- if getattr(attn, "norm_added_k", None) is not None:
108
- txt_k = attn.norm_added_k(txt_k)
109
-
110
- # ---- RoPE (Qwen variant) ----
111
- if image_rotary_emb is not None:
112
- img_freqs, txt_freqs = image_rotary_emb
113
- # expects tensors shaped (B, S, H, D_h)
114
- img_q = apply_rotary_emb_qwen(img_q, img_freqs, use_real=False)
115
- img_k = apply_rotary_emb_qwen(img_k, img_freqs, use_real=False)
116
- txt_q = apply_rotary_emb_qwen(txt_q, txt_freqs, use_real=False)
117
- txt_k = apply_rotary_emb_qwen(txt_k, txt_freqs, use_real=False)
118
-
119
- # ---- Joint attention over [text, image] along sequence axis ----
120
- # Shapes: (B, S_total, H, D_h)
121
- q = torch.cat([txt_q, img_q], dim=1)
122
- k = torch.cat([txt_k, img_k], dim=1)
123
- v = torch.cat([txt_v, img_v], dim=1)
124
-
125
- # FlashAttention-3 path expects (B, S, H, D_h) and returns (out, softmax_lse)
126
- out = flash_attn_func(q, k, v, causal=False) # out: (B, S_total, H, D_h)
127
-
128
- # ---- Back to (B, S, D_model) ----
129
- out = out.flatten(2, 3).to(q.dtype)
130
-
131
- # Split back to text / image segments
132
- txt_attn_out = out[:, :S_txt, :]
133
- img_attn_out = out[:, S_txt:, :]
134
-
135
- # ---- Output projections ----
136
- img_attn_out = attn.to_out[0](img_attn_out)
137
- if len(attn.to_out) > 1:
138
- img_attn_out = attn.to_out[1](img_attn_out) # dropout if present
139
-
140
- txt_attn_out = attn.to_add_out(txt_attn_out)
141
-
142
- return img_attn_out, txt_attn_out