SykoSLM commited on
Commit
fa7ad3e
·
verified ·
1 Parent(s): 5a737ea

Upload 2 files

Browse files
Files changed (2) hide show
  1. configuration_sykoomni.py +43 -0
  2. modeling_sykoomni.py +192 -0
configuration_sykoomni.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class SykoConfig(PretrainedConfig):
4
+ model_type = "sykoomni"
5
+
6
+ def __init__(
7
+ self,
8
+ text_vocab_size=32000,
9
+ image_vocab_size=8192,
10
+ audio_vocab_size=1024,
11
+ d_model=768,
12
+ n_layers=24,
13
+ n_heads=6,
14
+ num_memory_tokens=16,
15
+ num_global_memory_tokens=32,
16
+ intermediate_size=3072,
17
+ chunk_size=128,
18
+ context_size=1024,
19
+ causal_reasoning_layers=2,
20
+ vision_hidden_size=768,
21
+ audio_hidden_size=384,
22
+ max_image_tokens=256,
23
+ max_audio_tokens=500,
24
+ **kwargs
25
+ ):
26
+ super().__init__(**kwargs)
27
+ self.text_vocab_size = text_vocab_size
28
+ self.image_vocab_size = image_vocab_size
29
+ self.audio_vocab_size = audio_vocab_size
30
+ self.vocab_size = text_vocab_size + image_vocab_size + audio_vocab_size + 10
31
+ self.d_model = d_model
32
+ self.n_layers = n_layers
33
+ self.n_heads = n_heads
34
+ self.num_memory_tokens = num_memory_tokens
35
+ self.num_global_memory_tokens = num_global_memory_tokens
36
+ self.intermediate_size = intermediate_size
37
+ self.chunk_size = chunk_size
38
+ self.context_size = context_size
39
+ self.causal_reasoning_layers = causal_reasoning_layers
40
+ self.vision_hidden_size = vision_hidden_size
41
+ self.audio_hidden_size = audio_hidden_size
42
+ self.max_image_tokens = max_image_tokens
43
+ self.max_audio_tokens = max_audio_tokens
modeling_sykoomni.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import PreTrainedModel, SiglipVisionModel, WhisperModel
4
+ from .configuration_sykoomni import SykoConfig
5
+
6
+
7
+ class SykoMemoryGate(nn.Module):
8
+ def __init__(self, d_model):
9
+ super().__init__()
10
+ self.forget_linear = nn.Linear(d_model * 2, d_model)
11
+ self.update_linear = nn.Linear(d_model, d_model)
12
+ self.norm = nn.LayerNorm(d_model)
13
+
14
+ def forward(self, current_context, prev_memory):
15
+ combined = torch.cat([current_context, prev_memory], dim=-1)
16
+ forget_ratio = torch.sigmoid(self.forget_linear(combined))
17
+ new_candidate = torch.tanh(self.update_linear(current_context))
18
+ return self.norm((forget_ratio * prev_memory) + ((1 - forget_ratio) * new_candidate))
19
+
20
+
21
+ class SykoSmartMemoryGate(nn.Module):
22
+ def __init__(self, d_model, num_heads=4):
23
+ super().__init__()
24
+ self.summarizer = nn.MultiheadAttention(d_model, num_heads, batch_first=True)
25
+ self.forget_linear = nn.Linear(d_model * 3, d_model)
26
+ self.update_linear = nn.Linear(d_model, d_model)
27
+ self.norm = nn.LayerNorm(d_model)
28
+
29
+ def forward(self, full_chunk_output, global_memory_output, prev_global_memory, context=None):
30
+ summary, _ = self.summarizer(query=global_memory_output,
31
+ key=full_chunk_output, value=full_chunk_output)
32
+ ctx = (context.mean(dim=1, keepdim=True).expand_as(summary)
33
+ if context is not None else torch.zeros_like(summary))
34
+ combined = torch.cat([summary, ctx, prev_global_memory], dim=-1)
35
+ forget_ratio = torch.sigmoid(self.forget_linear(combined))
36
+ new_candidate = torch.tanh(self.update_linear(summary))
37
+ return self.norm((forget_ratio * prev_global_memory) + ((1 - forget_ratio) * new_candidate))
38
+
39
+
40
+ class SykoCausalHead(nn.Module):
41
+ def __init__(self, d_model, vocab_size, num_layers=2):
42
+ super().__init__()
43
+ layers = []
44
+ for _ in range(num_layers):
45
+ layers.extend([nn.Linear(d_model, d_model), nn.GELU(), nn.LayerNorm(d_model)])
46
+ layers.append(nn.Linear(d_model, vocab_size))
47
+ self.net = nn.Sequential(*layers)
48
+
49
+ def forward(self, x):
50
+ return self.net(x)
51
+
52
+
53
+ class SykoModalEmbedding(nn.Module):
54
+ def __init__(self, d_model):
55
+ super().__init__()
56
+ self.modal_embed = nn.Embedding(4, d_model)
57
+
58
+ def forward(self, x, modal_type):
59
+ return x + self.modal_embed(torch.tensor(modal_type, device=x.device))
60
+
61
+
62
+ class SykoOmni(PreTrainedModel):
63
+ config_class = SykoConfig
64
+ _tied_weights_keys = []
65
+
66
+ def __init__(self, config: SykoConfig):
67
+ super().__init__(config)
68
+ if not hasattr(self, "all_tied_weights_keys"):
69
+ self.all_tied_weights_keys = {}
70
+ self.d_model = config.d_model
71
+ self.mem_tokens = config.num_memory_tokens
72
+ self.g_mem_tokens = config.num_global_memory_tokens
73
+
74
+ self.text_embedding = nn.Embedding(config.text_vocab_size, config.d_model)
75
+ self.image_embedding = nn.Embedding(config.image_vocab_size, config.d_model)
76
+ self.audio_embedding = nn.Embedding(config.audio_vocab_size, config.d_model)
77
+
78
+ self.pos_embedding = nn.Embedding(
79
+ config.context_size + config.num_memory_tokens +
80
+ config.num_global_memory_tokens + 50, config.d_model)
81
+
82
+ encoder_layer = nn.TransformerEncoderLayer(
83
+ d_model=config.d_model, nhead=config.n_heads,
84
+ dim_feedforward=config.intermediate_size,
85
+ batch_first=True, norm_first=True)
86
+ self.transformer = nn.TransformerEncoder(
87
+ encoder_layer, num_layers=config.n_layers,
88
+ norm=nn.LayerNorm(config.d_model))
89
+ self.memory_gate = SykoMemoryGate(config.d_model)
90
+ self.global_memory_gate = SykoSmartMemoryGate(config.d_model, num_heads=4)
91
+
92
+ self.text_head = nn.Linear(config.d_model, config.text_vocab_size)
93
+ self.image_head = nn.Linear(config.d_model, config.image_vocab_size)
94
+ self.audio_head = nn.Linear(config.d_model, config.audio_vocab_size)
95
+
96
+ self.causal_head = SykoCausalHead(config.d_model, config.text_vocab_size,
97
+ config.causal_reasoning_layers)
98
+ self.modal_embedding = SykoModalEmbedding(config.d_model)
99
+
100
+ self.vision_encoder = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224")
101
+ for p in self.vision_encoder.parameters():
102
+ p.requires_grad = False
103
+ self.vision_proj = nn.Sequential(
104
+ nn.Linear(config.vision_hidden_size, config.d_model),
105
+ nn.GELU(), nn.LayerNorm(config.d_model))
106
+
107
+ self.audio_encoder = WhisperModel.from_pretrained("openai/whisper-tiny").encoder
108
+ for p in self.audio_encoder.parameters():
109
+ p.requires_grad = False
110
+ self.audio_proj = nn.Sequential(
111
+ nn.Linear(config.audio_hidden_size, config.d_model),
112
+ nn.GELU(), nn.LayerNorm(config.d_model))
113
+
114
+ def _adjust_tied_keys_with_tied_pointers(self, missing_keys):
115
+ pass
116
+
117
+ def _token_ids_to_embeddings(self, input_ids, text_vocab_size, image_vocab_size):
118
+ batch, seq = input_ids.shape
119
+ embeddings = torch.zeros(batch, seq, self.d_model, device=input_ids.device)
120
+
121
+ text_mask = input_ids < text_vocab_size
122
+ if text_mask.any():
123
+ safe = input_ids.clone()
124
+ safe[~text_mask] = 0
125
+ embeddings[text_mask] = self.text_embedding(safe)[text_mask]
126
+
127
+ img_start = text_vocab_size
128
+ img_end = text_vocab_size + image_vocab_size
129
+ img_mask = (input_ids >= img_start) & (input_ids < img_end)
130
+ if img_mask.any():
131
+ ids = input_ids.clone()
132
+ ids[~img_mask] = 0
133
+ embeddings[img_mask] = self.image_embedding(
134
+ (ids - img_start).clamp(0, image_vocab_size - 1))[img_mask]
135
+
136
+ aud_mask = input_ids >= img_end
137
+ if aud_mask.any():
138
+ ids = input_ids.clone()
139
+ ids[~aud_mask] = 0
140
+ embeddings[aud_mask] = self.audio_embedding(
141
+ (ids - img_end).clamp(0, self.config.audio_vocab_size - 1))[aud_mask]
142
+
143
+ return embeddings
144
+
145
+ def forward(self, input_ids, prev_memory, global_memory,
146
+ chunk_start_idx=0, pixel_values=None, audio_features=None,
147
+ generation_mode=None):
148
+ cfg = self.config
149
+ x = self._token_ids_to_embeddings(input_ids, cfg.text_vocab_size, cfg.image_vocab_size)
150
+ x = self.modal_embedding(x, modal_type=0)
151
+
152
+ if pixel_values is not None:
153
+ with torch.no_grad():
154
+ vis_feats = self.vision_encoder(pixel_values=pixel_values).last_hidden_state
155
+ x = torch.cat([self.modal_embedding(self.vision_proj(vis_feats), 1), x], dim=1)
156
+
157
+ if audio_features is not None:
158
+ with torch.no_grad():
159
+ aud_feats = self.audio_encoder(audio_features).last_hidden_state
160
+ x = torch.cat([self.modal_embedding(self.audio_proj(aud_feats), 2), x], dim=1)
161
+
162
+ seq_len = x.size(1)
163
+ x_with_memory = torch.cat([global_memory, prev_memory, x], dim=1)
164
+
165
+ g_pos = torch.arange(0, self.g_mem_tokens, device=input_ids.device)
166
+ m_pos = torch.arange(self.g_mem_tokens, self.g_mem_tokens + self.mem_tokens, device=input_ids.device)
167
+ t_pos = torch.arange(self.g_mem_tokens + self.mem_tokens + chunk_start_idx,
168
+ self.g_mem_tokens + self.mem_tokens + chunk_start_idx + seq_len,
169
+ device=input_ids.device)
170
+ pos_ids = torch.cat([g_pos, m_pos, t_pos]).clamp(0, self.pos_embedding.num_embeddings - 1)
171
+ x_with_memory = x_with_memory + self.pos_embedding(pos_ids.unsqueeze(0))
172
+
173
+ causal_mask = nn.Transformer.generate_square_subsequent_mask(
174
+ x_with_memory.size(1), device=input_ids.device)
175
+ out = self.transformer(x_with_memory, mask=causal_mask)
176
+
177
+ gmo = out[:, :self.g_mem_tokens, :]
178
+ mo = out[:, self.g_mem_tokens:self.g_mem_tokens + self.mem_tokens, :]
179
+ to_ = out[:, self.g_mem_tokens + self.mem_tokens:, :]
180
+
181
+ if generation_mode == 'image':
182
+ logits = self.image_head(to_)
183
+ elif generation_mode == 'audio':
184
+ logits = self.audio_head(to_)
185
+ else:
186
+ logits = self.text_head(to_)
187
+
188
+ non_text_len = seq_len - input_ids.size(1)
189
+ return (logits, logits,
190
+ self.memory_gate(mo, prev_memory),
191
+ self.global_memory_gate(to_, gmo, global_memory, context=to_),
192
+ non_text_len)