|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PyTorch Qwen3 model.""" |
|
|
|
|
|
from typing import Callable, Optional, Tuple |
|
|
|
|
|
import torch |
|
|
import torch.utils.checkpoint |
|
|
|
|
|
from transformers.cache_utils import Cache |
|
|
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
|
|
from transformers.modeling_outputs import CausalLMOutputWithPast |
|
|
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS |
|
|
from transformers.processing_utils import Unpack |
|
|
from transformers.utils import LossKwargs, logging |
|
|
from ..gemma.modeling_gemma import GemmaMLP |
|
|
from ..llama.modeling_llama import ( |
|
|
LlamaAttention, |
|
|
LlamaDecoderLayer, |
|
|
LlamaForCausalLM, |
|
|
LlamaForQuestionAnswering, |
|
|
LlamaForSequenceClassification, |
|
|
LlamaForTokenClassification, |
|
|
LlamaRMSNorm, |
|
|
apply_rotary_pos_emb, |
|
|
eager_attention_forward, |
|
|
) |
|
|
from ..mistral.modeling_mistral import MistralModel |
|
|
from .configuration_qwen3 import Qwen3Config |
|
|
|
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
_CHECKPOINT_FOR_DOC = "Qwen/Qwen3-8B" |
|
|
|
|
|
|
|
|
class Qwen3RMSNorm(LlamaRMSNorm): |
|
|
pass |
|
|
|
|
|
|
|
|
class Qwen3MLP(GemmaMLP): |
|
|
pass |
|
|
|
|
|
|
|
|
class Qwen3Attention(LlamaAttention): |
|
|
def __init__(self, config: Qwen3Config, layer_idx: int): |
|
|
super().__init__(config, layer_idx) |
|
|
self.q_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) |
|
|
self.k_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) |
|
|
self.sliding_window = config.sliding_window |
|
|
if not ( |
|
|
self.config.use_sliding_window |
|
|
and getattr(self.config, "sliding_window", None) is not None |
|
|
and self.layer_idx >= self.config.max_window_layers |
|
|
): |
|
|
self.sliding_window = None |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
position_embeddings: Tuple[torch.Tensor, torch.Tensor], |
|
|
attention_mask: Optional[torch.Tensor], |
|
|
past_key_value: Optional[Cache] = None, |
|
|
cache_position: Optional[torch.LongTensor] = None, |
|
|
**kwargs: Unpack[FlashAttentionKwargs], |
|
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
|
|
input_shape = hidden_states.shape[:-1] |
|
|
hidden_shape = (*input_shape, -1, self.head_dim) |
|
|
|
|
|
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
|
|
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
|
|
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) |
|
|
|
|
|
cos, sin = position_embeddings |
|
|
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
|
|
|
|
|
if past_key_value is not None: |
|
|
|
|
|
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} |
|
|
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) |
|
|
|
|
|
attention_interface: Callable = eager_attention_forward |
|
|
if self.config._attn_implementation != "eager": |
|
|
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): |
|
|
logger.warning_once( |
|
|
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " |
|
|
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
|
|
) |
|
|
else: |
|
|
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
|
|
|
|
|
attn_output, attn_weights = attention_interface( |
|
|
self, |
|
|
query_states, |
|
|
key_states, |
|
|
value_states, |
|
|
attention_mask, |
|
|
dropout=0.0 if not self.training else self.attention_dropout, |
|
|
scaling=self.scaling, |
|
|
sliding_window=self.sliding_window, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
|
|
attn_output = self.o_proj(attn_output) |
|
|
return attn_output, attn_weights |
|
|
|
|
|
|
|
|
class Qwen3DecoderLayer(LlamaDecoderLayer): |
|
|
def __init__(self, config: Qwen3Config, layer_idx: int): |
|
|
super().__init__() |
|
|
self.self_attn = Qwen3Attention(config=config, layer_idx=layer_idx) |
|
|
self.mlp = Qwen3MLP(config) |
|
|
if ( |
|
|
config.sliding_window and config._attn_implementation != "flash_attention_2" |
|
|
): |
|
|
logger.warning_once( |
|
|
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " |
|
|
"unexpected results may be encountered." |
|
|
) |
|
|
|
|
|
|
|
|
class Qwen3Model(MistralModel): |
|
|
pass |
|
|
|
|
|
|
|
|
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... |
|
|
|
|
|
|
|
|
class Qwen3ForCausalLM(LlamaForCausalLM): |
|
|
def forward( |
|
|
self, |
|
|
**super_kwargs: Unpack[KwargsForCausalLM], |
|
|
) -> CausalLMOutputWithPast: |
|
|
r""" |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
|
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
|
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
|
|
|
|
|
Example: |
|
|
|
|
|
```python |
|
|
>>> from transformers import AutoTokenizer, Qwen3ForCausalLM |
|
|
|
|
|
>>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B") |
|
|
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B") |
|
|
|
|
|
>>> prompt = "Hey, are you conscious? Can you talk to me?" |
|
|
>>> inputs = tokenizer(prompt, return_tensors="pt") |
|
|
|
|
|
>>> # Generate |
|
|
>>> generate_ids = model.generate(inputs.input_ids, max_length=30) |
|
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
|
|
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." |
|
|
```""" |
|
|
return super().forward(**super_kwargs) |
|
|
|
|
|
|
|
|
class Qwen3ForSequenceClassification(LlamaForSequenceClassification): |
|
|
pass |
|
|
|
|
|
|
|
|
class Qwen3ForTokenClassification(LlamaForTokenClassification): |
|
|
pass |
|
|
|
|
|
|
|
|
class Qwen3ForQuestionAnswering(LlamaForQuestionAnswering): |
|
|
pass |
|
|
|
|
|
|
|
|
__all__ = [ |
|
|
"Qwen3ForCausalLM", |
|
|
"Qwen3ForQuestionAnswering", |
|
|
"Qwen3Model", |
|
|
"Qwen3PreTrainedModel", |
|
|
"Qwen3ForSequenceClassification", |
|
|
"Qwen3ForTokenClassification", |
|
|
] |
|
|
|