File size: 2,046 Bytes
24bca9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
import torch.nn as nn
from transformers import DistilBertModel, DistilBertConfig
from diffusers import UNet2DConditionModel

class VideoJEPA(nn.Module):
    def __init__(self, text_dim=768, video_dim=512, latent_dim=1024):
        super().__init__()
        
        # Video Encoder (Hierarchical 3D CNN)
        self.video_encoder = nn.Sequential(
            nn.Conv3d(3, 64, kernel_size=(3, 5, 5), stride=(1, 2, 2)),
            nn.ReLU(),
            nn.MaxPool3d((1, 2, 2)),
            nn.Conv3d(64, 128, kernel_size=(3, 3, 3)),
            nn.ReLU(),
            nn.AdaptiveAvgPool3d((None, 8, 8))
        )
        self.video_proj = nn.Linear(128*8*8, video_dim)
        
        # Text Encoder (DistilBERT)
        self.text_encoder = DistilBertModel.from_pretrained("distilbert-base-uncased")
        self.text_proj = nn.Linear(text_dim, latent_dim)
        
        # Joint Fusion Transformer
        self.fusion_transformer = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=latent_dim, nhead=8),
            num_layers=4
        )
        
        # Diffusion Decoder (Conditional UNet)
        self.diffusion_decoder = UNet2DConditionModel(
            sample_size=64,
            in_channels=3,
            out_channels=3,
            cross_attention_dim=latent_dim
        )

    def forward(self, video, text_input):
        # Video encoding
        B, C, T, H, W = video.shape
        video_features = self.video_encoder(video)
        video_features = video_features.permute(0, 2, 1, 3, 4).contiguous()
        video_features = video_features.view(B*T, -1)
        video_emb = self.video_proj(video_features).view(B, T, -1)
        
        # Text encoding
        text_emb = self.text_encoder(**text_input).last_hidden_state
        text_emb = self.text_proj(text_emb[:, 0])  # [CLS] token
        
        # Contextual fusion
        fused_emb = torch.cat([video_emb, text_emb.unsqueeze(1)], dim=1)
        context_emb = self.fusion_transformer(fused_emb)
        
        return context_emb