Feature Extraction
Transformers
PyTorch
English
samfatnassi commited on
Commit
8564004
·
verified ·
1 Parent(s): 74d9cd0

Upload architecture.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. architecture.py +53 -0
architecture.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+ class ResidualBlock(nn.Module):
6
+ def __init__(self, dim):
7
+ super().__init__()
8
+ self.ln = nn.LayerNorm(dim)
9
+ self.fc = nn.Sequential(
10
+ nn.Linear(dim, dim),
11
+ nn.GELU(),
12
+ nn.Linear(dim, dim)
13
+ )
14
+ def forward(self, x):
15
+ return x + self.fc(self.ln(x))
16
+
17
+ class SADIM_V2_Expert(nn.Module):
18
+ def __init__(self, input_dim=12, latent_dim=64):
19
+ super().__init__()
20
+ h = 3584
21
+ self.encoder = nn.Sequential(
22
+ nn.Linear(input_dim, h),
23
+ nn.LayerNorm(h),
24
+ nn.GELU(),
25
+ nn.Linear(h, h),
26
+ nn.LayerNorm(h),
27
+ nn.GELU(),
28
+ ResidualBlock(h)
29
+ )
30
+ self.fc_mu = nn.Linear(h, latent_dim)
31
+ self.fc_logvar = nn.Linear(h, latent_dim)
32
+ self.decoder = nn.Sequential(
33
+ nn.Linear(latent_dim, h),
34
+ nn.GELU(),
35
+ nn.Linear(h, h),
36
+ nn.LayerNorm(h),
37
+ nn.GELU(),
38
+ ResidualBlock(h),
39
+ nn.Linear(h, input_dim)
40
+ )
41
+
42
+ def forward(self, x):
43
+ h = self.encoder(x)
44
+ mu, logvar = self.fc_mu(h), self.fc_logvar(h)
45
+ logvar = torch.clamp(logvar, -10, 10)
46
+ std = torch.exp(0.5 * logvar)
47
+ z = mu + torch.randn_like(mu) * std
48
+ return self.decoder(z), mu, logvar
49
+
50
+ # --- مذكرة المعالجة المسبقة (Preprocessing Guide) ---
51
+ # للحصول على نتائج صحيحة غداً، استخدم هذه القسمة:
52
+ # ra, l: /360.0 | dec, b: /90.0 | d_pc: /10000.0
53
+ # pmra, pmdec: /500.0 | x, y, z: /15000.0