aixk_custom_model-gguf / modeling_aixk.py
aixk's picture
Upload modeling_aixk.py with huggingface_hub
2d33034 verified
import torch
import torch.nn as nn
import math
class AIXKNovelAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.q_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.k_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.v_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.local_context = nn.Conv1d(self.hidden_size, self.hidden_size, kernel_size=3, padding=1, groups=self.hidden_size)
self.out_proj = nn.Linear(self.hidden_size, self.hidden_size)
def forward(self, hidden_states, attention_mask=None):
batch_size, seq_len, _ = hidden_states.size()
local_feat = self.local_context(hidden_states.transpose(1, 2)).transpose(1, 2)
hidden_states = hidden_states + local_feat
q = self.q_proj(hidden_states).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
k = self.k_proj(hidden_states).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
v = self.v_proj(hidden_states).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
attn_weights = torch.matmul(q, k.transpose(-1, -2)) / math.sqrt(self.head_dim)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_probs = nn.functional.softmax(attn_weights, dim=-1)
attn_output = torch.matmul(attn_probs, v)
return self.out_proj(attn_output.transpose(1, 2).contiguous().view(batch_size, seq_len, self.hidden_size))
class SentenceSegmentationLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.boundary_detector = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size), nn.Tanh(), nn.Linear(config.hidden_size, 1))
self.gate = nn.Sigmoid()
def forward(self, hidden_states):
return hidden_states * self.gate(self.boundary_detector(hidden_states))
class AIXKCustomModelFixed(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList([
nn.ModuleDict({
'attn': AIXKNovelAttention(config),
'seg': SentenceSegmentationLayer(config),
'mlp': nn.Sequential(nn.Linear(config.hidden_size, config.intermediate_size), nn.GELU(), nn.Linear(config.intermediate_size, config.hidden_size)),
'norm1': nn.LayerNorm(config.hidden_size),
'norm2': nn.LayerNorm(config.hidden_size)
}) for _ in range(config.num_hidden_layers)
])
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
def forward(self, input_ids, attention_mask=None):
x = self.embeddings(input_ids)
if attention_mask is None:
seq_len = input_ids.shape[1]
attention_mask = torch.triu(torch.ones(seq_len, seq_len, device=input_ids.device), diagonal=1).bool()
attention_mask = attention_mask.masked_fill(attention_mask, float('-inf'))
for layer in self.layers:
x = x + layer['attn'](layer['norm1'](x), attention_mask)
x = layer['seg'](x)
x = x + layer['mlp'](layer['norm2'](x))
return self.lm_head(x)