Elliotasdasdasfasas commited on
Commit
99f6209
·
1 Parent(s): 77e67bb

v10.1.6: Restore missing PositionalEncoding class

Browse files
Files changed (1) hide show
  1. app.py +21 -0
app.py CHANGED
@@ -75,6 +75,27 @@ device = None
75
  MAX_DIM = 2048
76
  DEFAULT_DIM = 1024
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  class VLJEPAPredictor(nn.Module):
79
  """
80
  VL-JEPA Predictor v10.1 (Paper-Faithful Sequence)
 
75
  MAX_DIM = 2048
76
  DEFAULT_DIM = 1024
77
 
78
+ # ============================================================================
79
+ # L18.5: VL-JEPA PAPER-FAITHFUL IMPLEMENTATION
80
+ # ============================================================================
81
+
82
+ class PositionalEncoding(nn.Module):
83
+ """Positional encoding for Transformer (paper-style)."""
84
+ def __init__(self, d_model, max_len=512, dropout=0.1):
85
+ super().__init__()
86
+ self.dropout = nn.Dropout(p=dropout)
87
+
88
+ pe = torch.zeros(max_len, d_model)
89
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
90
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
91
+ pe[:, 0::2] = torch.sin(position * div_term)
92
+ pe[:, 1::2] = torch.cos(position * div_term)
93
+ self.register_buffer('pe', pe.unsqueeze(0))
94
+
95
+ def forward(self, x):
96
+ x = x + self.pe[:, :x.size(1)]
97
+ return self.dropout(x)
98
+
99
  class VLJEPAPredictor(nn.Module):
100
  """
101
  VL-JEPA Predictor v10.1 (Paper-Faithful Sequence)