| from transformers import PretrainedConfig, PreTrainedModel, AutoModelForCausalLM |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import math |
| from transformers.modeling_outputs import CausalLMOutputWithPast |
|
|
| |
| class CrossExpertAttention(nn.Module): |
| def __init__(self, config: MeshConfig): |
| super().__init__() |
| self.config = config |
| |
| |
| self.cross_attention = nn.MultiheadAttention( |
| embed_dim=config.hidden_size, |
| num_heads=config.num_attention_heads, |
| batch_first=True |
| ) |
|
|
| def forward(self, expert_outputs): |
| |
|
|
| if not self.config.cross_expert_attention_enabled: |
| return expert_outputs |
|
|
| |
| batch_seq_size = expert_outputs.shape[0] * expert_outputs.shape[1] |
| reshaped_outputs = expert_outputs.view(batch_seq_size, self.config.mesh_grid_size[0] * self.config.mesh_grid_size[1], self.config.hidden_size) |
|
|
| |
| |
| cross_attn_output, _ = self.cross_attention(reshaped_outputs, reshaped_outputs, reshaped_outputs) |
|
|
| |
| cross_attn_output = cross_attn_output.view( |
| expert_outputs.shape[0], expert_outputs.shape[1], self.config.mesh_grid_size[0] * self.config.mesh_grid_size[1], self.config.hidden_size |
| ) |
|
|
| return cross_attn_output |