| import torch | |
| import torch.nn as nn | |
| class ResidualBlock(nn.Module): | |
| def __init__(self, dim): | |
| super().__init__() | |
| self.ln = nn.LayerNorm(dim) | |
| self.fc = nn.Sequential( | |
| nn.Linear(dim, dim), | |
| nn.GELU(), | |
| nn.Linear(dim, dim) | |
| ) | |
| def forward(self, x): | |
| return x + self.fc(self.ln(x)) | |
| class SADIM_V2_Expert(nn.Module): | |
| def __init__(self, input_dim=12, latent_dim=64): | |
| super().__init__() | |
| h = 3584 | |
| self.encoder = nn.Sequential( | |
| nn.Linear(input_dim, h), | |
| nn.LayerNorm(h), | |
| nn.GELU(), | |
| nn.Linear(h, h), | |
| nn.LayerNorm(h), | |
| nn.GELU(), | |
| ResidualBlock(h) | |
| ) | |
| self.fc_mu = nn.Linear(h, latent_dim) | |
| self.fc_logvar = nn.Linear(h, latent_dim) | |
| self.decoder = nn.Sequential( | |
| nn.Linear(latent_dim, h), | |
| nn.GELU(), | |
| nn.Linear(h, h), | |
| nn.LayerNorm(h), | |
| nn.GELU(), | |
| ResidualBlock(h), | |
| nn.Linear(h, input_dim) | |
| ) | |
| def forward(self, x): | |
| h = self.encoder(x) | |
| mu, logvar = self.fc_mu(h), self.fc_logvar(h) | |
| logvar = torch.clamp(logvar, -10, 10) | |
| std = torch.exp(0.5 * logvar) | |
| z = mu + torch.randn_like(mu) * std | |
| return self.decoder(z), mu, logvar | |
| # --- مذكرة المعالجة المسبقة (Preprocessing Guide) --- | |
| # للحصول على نتائج صحيحة غداً، استخدم هذه القسمة: | |
| # ra, l: /360.0 | dec, b: /90.0 | d_pc: /10000.0 | |
| # pmra, pmdec: /500.0 | x, y, z: /15000.0 | |