Spaces:
Runtime error
Runtime error
| from typing import Tuple, List | |
| import torch | |
| from torch import nn, LongTensor, FloatTensor, BoolTensor | |
| from .dalle_bart_encoder import GLU, AttentionBase | |
| import gc | |
| import tracemalloc | |
| IMAGE_TOKEN_COUNT = 256 | |
| class DecoderCrossAttention(AttentionBase): | |
| def forward( | |
| self, | |
| decoder_state: FloatTensor, | |
| encoder_state: FloatTensor, | |
| attention_mask: BoolTensor | |
| ) -> FloatTensor: | |
| keys = self.k_proj.forward(encoder_state) | |
| values = self.v_proj.forward(encoder_state) | |
| queries = self.q_proj.forward(decoder_state) | |
| return super().forward(keys, values, queries, attention_mask) | |
| class DecoderSelfAttention(AttentionBase): | |
| def __init__(self, head_count: int, embed_count: int): | |
| super().__init__(head_count, embed_count) | |
| def forward( | |
| self, | |
| decoder_state: FloatTensor, | |
| attention_state: FloatTensor, | |
| attn_mask: BoolTensor, | |
| token_index: LongTensor | |
| ) -> Tuple[FloatTensor, FloatTensor]: | |
| keys = self.k_proj.forward(decoder_state) | |
| values = self.v_proj.forward(decoder_state) | |
| queries = self.q_proj.forward(decoder_state) | |
| attn_state_new = torch.cat([keys, values]).to(attention_state.dtype) | |
| attention_state[:, token_index] = attn_state_new | |
| batch_count = decoder_state.shape[0] | |
| keys = attention_state[:batch_count] | |
| values = attention_state[batch_count:] | |
| decoder_state = super().forward(keys, values, queries, attn_mask) | |
| return decoder_state, attention_state | |
| class DecoderLayer(nn.Module): | |
| def __init__( | |
| self, | |
| head_count: int, | |
| embed_count: int, | |
| glu_embed_count: int, | |
| device: str | |
| ): | |
| super().__init__() | |
| self.pre_self_attn_layer_norm = nn.LayerNorm(embed_count) | |
| self.self_attn = DecoderSelfAttention(head_count, embed_count) | |
| self.self_attn_layer_norm = nn.LayerNorm(embed_count) | |
| self.pre_encoder_attn_layer_norm = nn.LayerNorm(embed_count) | |
| self.encoder_attn = DecoderCrossAttention(head_count, embed_count) | |
| self.encoder_attn_layer_norm = nn.LayerNorm(embed_count) | |
| self.glu = GLU(embed_count, glu_embed_count) | |
| self.token_indices = torch.arange(IMAGE_TOKEN_COUNT, device=device) | |
| def forward( | |
| self, | |
| decoder_state: FloatTensor, | |
| encoder_state: FloatTensor, | |
| attention_state: FloatTensor, | |
| attention_mask: BoolTensor, | |
| token_index: LongTensor | |
| ) -> Tuple[FloatTensor, FloatTensor]: | |
| # Self Attention | |
| self_attn_mask = self.token_indices < token_index + 1 | |
| self_attn_mask = self_attn_mask[None][[0] * decoder_state.shape[0]] | |
| residual = decoder_state | |
| decoder_state = self.pre_self_attn_layer_norm.forward(decoder_state) | |
| decoder_state, attention_state = self.self_attn.forward( | |
| decoder_state=decoder_state, | |
| attention_state=attention_state, | |
| attn_mask=self_attn_mask, | |
| token_index=token_index | |
| ) | |
| decoder_state = self.self_attn_layer_norm.forward(decoder_state) | |
| decoder_state = residual + decoder_state | |
| # Cross Attention | |
| residual = decoder_state | |
| decoder_state = self.pre_encoder_attn_layer_norm.forward(decoder_state) | |
| decoder_state = self.encoder_attn.forward( | |
| decoder_state=decoder_state, | |
| encoder_state=encoder_state, | |
| attention_mask=attention_mask | |
| ) | |
| decoder_state = self.encoder_attn_layer_norm.forward(decoder_state) | |
| decoder_state = residual + decoder_state | |
| # Feed forward | |
| residual = decoder_state | |
| decoder_state = self.glu.forward(decoder_state) | |
| decoder_state = residual + decoder_state | |
| return decoder_state, attention_state | |
| class DalleBartDecoder(nn.Module): | |
| def __init__( | |
| self, | |
| image_vocab_count: int, | |
| embed_count: int, | |
| attention_head_count: int, | |
| glu_embed_count: int, | |
| layer_count: int, | |
| device: str | |
| ): | |
| super().__init__() | |
| self.layer_count = layer_count | |
| self.embed_count = embed_count | |
| self.image_vocab_count = image_vocab_count | |
| self.embed_tokens = nn.Embedding(image_vocab_count + 1, embed_count) | |
| self.embed_positions = nn.Embedding(IMAGE_TOKEN_COUNT, embed_count) | |
| self.layers: List[DecoderLayer] = nn.ModuleList([ | |
| DecoderLayer( | |
| head_count=attention_head_count, | |
| embed_count=embed_count, | |
| glu_embed_count=glu_embed_count, | |
| device=device | |
| ) | |
| for _ in range(layer_count) | |
| ]) | |
| self.layernorm_embedding = nn.LayerNorm(embed_count) | |
| self.final_ln = nn.LayerNorm(embed_count) | |
| self.lm_head = nn.Linear(embed_count, image_vocab_count + 1, bias=False) | |
| self.token_indices = torch.arange(IMAGE_TOKEN_COUNT, device=device) | |
| def forward( | |
| self, | |
| settings: FloatTensor, | |
| attention_mask: BoolTensor, | |
| encoder_state: FloatTensor, | |
| attention_state: FloatTensor, | |
| prev_tokens: LongTensor, | |
| token_index: LongTensor | |
| ) -> Tuple[LongTensor, FloatTensor]: | |
| image_count = encoder_state.shape[0] // 2 | |
| token_index_batched = token_index[[0] * image_count * 2] | |
| prev_tokens = prev_tokens[list(range(image_count)) * 2] | |
| prev_tokens.clamp_(0, self.image_vocab_count) | |
| decoder_state = self.embed_tokens.forward(prev_tokens) | |
| decoder_state += self.embed_positions.forward(token_index_batched) | |
| decoder_state = self.layernorm_embedding.forward(decoder_state) | |
| decoder_state = decoder_state[:, None] | |
| tracemalloc.start() | |
| print("--") | |
| # displaying the memory | |
| print(tracemalloc.get_traced_memory()) | |
| for i in range(self.layer_count): | |
| decoder_state, attention_state[i] = self.layers[i].forward( | |
| decoder_state, | |
| encoder_state, | |
| attention_state[i], | |
| attention_mask, | |
| token_index | |
| ) | |
| print(tracemalloc.get_traced_memory()) | |
| decoder_state = self.final_ln(decoder_state) | |
| logits = self.lm_head(decoder_state) | |
| print(tracemalloc.get_traced_memory()) | |
| del decoder_state | |
| temperature = settings[[0]] | |
| top_k = settings[[1]].to(torch.long) | |
| print(tracemalloc.get_traced_memory()) | |
| supercondition_factor = settings[[2]] | |
| logits = logits[:, -1, : 2 ** 14] | |
| logits: FloatTensor = ( | |
| logits[:image_count] * (1 - supercondition_factor) + | |
| logits[image_count:] * supercondition_factor | |
| ) | |
| print(tracemalloc.get_traced_memory()) | |
| del supercondition_factor | |
| logits_sorted, _ = logits.sort(descending=True) | |
| is_kept = logits >= logits_sorted[:, top_k - 1] | |
| del top_k | |
| logits -= logits_sorted[:, [0]] | |
| del logits_sorted | |
| logits /= temperature | |
| del temperature | |
| logits.exp_() | |
| logits *= is_kept.to(torch.float32) | |
| del is_kept | |
| image_tokens = torch.multinomial(logits, 1)[:, 0] | |
| del logits | |
| gc.collect() | |
| print(tracemalloc.get_traced_memory()) | |
| return image_tokens, attention_state |