ayda138000 commited on
Commit
ad8eda7
·
verified ·
1 Parent(s): ca936f6

Upload 8 files

Browse files
Files changed (8) hide show
  1. README.md +48 -3
  2. __init__.py +3 -0
  3. dualmaxwell.pth +3 -0
  4. example_usage.py +157 -0
  5. hubconf.py +17 -0
  6. losses.py +93 -0
  7. models.py +117 -0
  8. setup.py +28 -0
README.md CHANGED
@@ -1,3 +1,48 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # pinn_electromagnetics
3
+
4
+ A Python package for simulating electromagnetic fields using Physics-Informed Neural Networks (PINNs).
5
+
6
+ This package implements custom neural network architectures and loss functions to solve Maxwell's equations, particularly focusing on wave propagation problems in complex geometries.
7
+
8
+ ## Installation
9
+
10
+ To install the package, navigate to the root directory of the package and run:
11
+
12
+ ```bash
13
+ pip install .
14
+ ```
15
+
16
+ ## Usage
17
+
18
+ See `example_usage.py` for a detailed example of how to use the models and loss functions, load pre-trained weights, and visualize results.
19
+
20
+ ## Models Included
21
+
22
+ - `HashGridEncoder`: Efficient multi-resolution hash grid encoder for spatial coordinates.
23
+ - `GeoNetHash`: A geometry network that learns Signed Distance Functions (SDFs) of complex objects using `HashGridEncoder`.
24
+ - `PositionalEncoder4D`: A standard positional encoder for 4D (x, y, z, t) inputs.
25
+ - `MaxwellPINN`: The core physics-informed neural network that predicts electromagnetic fields (E, B) based on space and time coordinates.
26
+
27
+ ## Loss Functions Included
28
+
29
+ - `compute_all_derivatives`: Helper function to compute all necessary partial derivatives of E and B fields with respect to x, y, z, and t.
30
+ - `compute_maxwell_loss`: Implements the physics-based loss derived from Maxwell's equations (Gauss's laws, Faraday's law, Ampere-Maxwell law).
31
+ - `compute_data_loss`: Computes the data-driven loss from initial conditions (E0, B0) and source boundary conditions (e.g., oscillating E-field at an antenna).
32
+
33
+ ## Data Format
34
+
35
+ The `example_usage.py` expects a `ground_truth.npz` file containing:
36
+
37
+ - `coords`: (N, 3) array of 3D coordinates.
38
+ - `sdf`: (N,) array of Signed Distance Function values for `coords`.
39
+ - `E0`: (N, 3) array of initial E-field values at `coords`.
40
+ - `B0`: (N, 3) array of initial B-field values at `coords`.
41
+ - `source_position`: (3,) array for the (x,y,z) coordinates of the antenna source.
42
+ - `source_orientation`: (3,) array for the orientation of the source E-field (e.g., [1,0,0] for x-polarized).
43
+ - `source_signal`: (T_steps,) array for the time-varying signal of the source.
44
+ - `t`: (T_steps,) array of time points corresponding to `source_signal`.
45
+
46
+ ## License
47
+
48
+ MIT License
__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .models import DualNetwork
2
+ from .models import HashGridEncoder, GeoNetHash, PositionalEncoder4D, MaxwellPINN
3
+ from .losses import compute_all_derivatives, compute_maxwell_loss, compute_data_loss
dualmaxwell.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f03307c4ea1b4e5fd84cebb7773e2ab2827ab774c19eadf6718dd285ff7aa40a
3
+ size 174347
example_usage.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import os
6
+
7
+ # Import models and losses from the package
8
+ from pinn_electromagnetics.models import GeoNetHash, MaxwellPINN
9
+ from pinn_electromagnetics.losses import compute_all_derivatives, compute_maxwell_loss, compute_data_loss
10
+
11
+ # --- Configuration Parameters ---
12
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+
14
+ # Paths to pre-trained model weights (assuming they are in the same directory as this script or specified otherwise)
15
+ # For Google Colab, these paths might need to be adjusted to '/content/drive/MyDrive/Colab_PINN_Projects/'
16
+ MODEL_DIR = "."
17
+ GEO_MODEL_PATH = os.path.join(MODEL_DIR, "geonet_real_v30.pth")
18
+ PHYS_MODEL_PATH = os.path.join(MODEL_DIR, "physnet_v31_real.pth")
19
+ NPZ_FILE_PATH = os.path.join(MODEL_DIR, "ground_truth.npz")
20
+
21
+ # Simulation parameters (should match training parameters)
22
+ BOX_SIZE_GEO = 2.0 # The full extent of geometry data, e.g., [-2, 2]
23
+
24
+ # --- 1. Load Data ---
25
+ print(f"Loading data from {NPZ_FILE_PATH}...")
26
+ try:
27
+ ground_truth_data = np.load(NPZ_FILE_PATH)
28
+ coords_np = ground_truth_data['coords']
29
+ sdf_np = ground_truth_data['sdf']
30
+ E0_np = ground_truth_data['E0']
31
+ B0_np = ground_truth_data['B0']
32
+ source_pos_np = ground_truth_data['source_position']
33
+ source_orientation_np = ground_truth_data['source_orientation']
34
+ source_signal_np = ground_truth_data['source_signal']
35
+ t_np = ground_truth_data['t']
36
+
37
+ # Convert to PyTorch tensors
38
+ coords_tensor = torch.tensor(coords_np, dtype=torch.float32).to(DEVICE)
39
+ sdf_tensor = torch.tensor(sdf_np, dtype=torch.float32).view(-1, 1).to(DEVICE)
40
+ E0_tensor = torch.tensor(E0_np, dtype=torch.float32).to(DEVICE)
41
+ B0_tensor = torch.tensor(B0_np, dtype=torch.float32).to(DEVICE)
42
+ source_pos_tensor = torch.tensor(source_pos_np, dtype=torch.float32).to(DEVICE)
43
+ source_orientation_tensor = torch.tensor(source_orientation_np, dtype=torch.float32).to(DEVICE)
44
+ source_signal_tensor = torch.tensor(source_signal_np, dtype=torch.float32).to(DEVICE)
45
+ t_tensor = torch.tensor(t_np, dtype=torch.float32).to(DEVICE)
46
+
47
+ if t_tensor.dim() == 1: t_tensor = t_tensor.view(-1, 1)
48
+ if source_signal_tensor.dim() == 1: source_signal_tensor = source_signal_tensor.view(-1, 1)
49
+
50
+ T_MAX = t_tensor.max().item()
51
+ print("Data loaded successfully.")
52
+ except FileNotFoundError:
53
+ print(f"Error: {NPZ_FILE_PATH} not found. Please ensure it's in the correct directory.")
54
+ exit()
55
+ except Exception as e:
56
+ print(f"Error loading NPZ data: {e}")
57
+ exit()
58
+
59
+ # --- 2. Load Models ---
60
+ print("Loading pre-trained models...")
61
+
62
+ # GeoNet (Geometry Network)
63
+ model_geo = GeoNetHash().to(DEVICE)
64
+ # The normalize_coords method sets self.min/max, crucial for GeoNet's forward pass
65
+ model_geo.normalize_coords(coords_tensor)
66
+ model_geo.load_state_dict(torch.load(GEO_MODEL_PATH, map_location=DEVICE))
67
+ model_geo.eval()
68
+
69
+ # MaxwellPINN (Physics Network)
70
+ model_phys = MaxwellPINN(num_freqs=8, hidden_dim=128).to(DEVICE)
71
+ model_phys.load_state_dict(torch.load(PHYS_MODEL_PATH, map_location=DEVICE))
72
+ model_phys.eval()
73
+
74
+ print("Models loaded successfully.")
75
+
76
+ # --- 3. Example Usage: Compute Losses on a Subset of Data (Optional) ---
77
+ print("
78
+ --- Computing example losses (physics and data) ---")
79
+ # For demonstration, let's take a small random subset of points
80
+ n_test_points = 1024
81
+ idx_test_colloc = torch.randperm(coords_tensor.shape[0], device=DEVICE)[:n_test_points * 2]
82
+ coords_test_all = coords_tensor[idx_test_colloc]
83
+
84
+ with torch.no_grad():
85
+ coords_test_norm = model_geo.normalize_coords(coords_test_all)
86
+ sdf_vals_test = model_geo(coords_test_norm)
87
+ mask_outside_cube = (sdf_vals_test > 0.05).flatten()
88
+ coords_test_colloc = coords_test_all[mask_outside_cube][:n_test_points]
89
+
90
+ x_c = coords_test_colloc[:, 0:1].requires_grad_(True)
91
+ y_c = coords_test_colloc[:, 1:2].requires_grad_(True)
92
+ z_c = coords_test_colloc[:, 2:3].requires_grad_(True)
93
+ t_c = (torch.rand(coords_test_colloc.shape[0], 1, device=DEVICE) * T_MAX).requires_grad_(True)
94
+
95
+ # Physics Loss
96
+ _, derivs = compute_all_derivatives(model_phys, x_c, y_c, z_c, t_c)
97
+ loss_physics_example = compute_maxwell_loss(derivs)
98
+ print(f"Example Physics Loss: {loss_physics_example.item():.6f}")
99
+
100
+ # Data Loss
101
+ loss_data_example = compute_data_loss(model_phys,
102
+ coords_tensor, E0_tensor, B0_tensor,
103
+ source_pos_tensor, source_orientation_tensor,
104
+ source_signal_tensor, t_tensor,
105
+ n_samples_ic=256, n_samples_source=t_tensor.shape[0])
106
+ print(f"Example Data Loss: {loss_data_example.item():.6f}")
107
+
108
+ # --- 4. Visualization ---
109
+ print("
110
+ --- Generating visualization of predicted Ex field ---")
111
+
112
+ with torch.no_grad():
113
+ resolution = 100
114
+ min_vals = coords_tensor.min(dim=0)[0]
115
+ max_vals = coords_tensor.max(dim=0)[0]
116
+
117
+ # Slice in the X-Z plane (at the center of Y)
118
+ x_range = np.linspace(min_vals[0].item(), max_vals[0].item(), resolution)
119
+ z_range = np.linspace(min_vals[2].item(), max_vals[2].item(), resolution)
120
+ xx, zz = np.meshgrid(x_range, z_range)
121
+ yy = np.full_like(xx, (min_vals[1] + max_vals[1]).item() / 2.0)
122
+
123
+ # Visualize at 75% of the total simulation time
124
+ T_VISUALIZATION = T_MAX * 0.75
125
+ tt = np.ones_like(xx) * T_VISUALIZATION
126
+
127
+ x_grid = torch.tensor(xx.flatten(), dtype=torch.float32).to(DEVICE).view(-1, 1)
128
+ y_grid = torch.tensor(yy.flatten(), dtype=torch.float32).to(DEVICE).view(-1, 1)
129
+ z_grid = torch.tensor(zz.flatten(), dtype=torch.float32).to(DEVICE).view(-1, 1)
130
+ t_grid = torch.tensor(tt.flatten(), dtype=torch.float32).to(DEVICE).view(-1, 1)
131
+
132
+ # Predict E-field
133
+ pred_all = model_phys(x_grid, y_grid, z_grid, t_grid)
134
+ Ex_pred_flat = pred_all[:, 0] # Get Ex component
135
+ Ex_pred_grid = Ex_pred_flat.cpu().numpy().reshape(resolution, resolution)
136
+
137
+ # Get geometry (SDF) for this slice
138
+ coords_viz = torch.cat([x_grid, y_grid, z_grid], dim=1)
139
+ coords_viz_norm = model_geo.normalize_coords(coords_viz)
140
+ sdf_grid = model_geo(coords_viz_norm).cpu().numpy().reshape(resolution, resolution)
141
+
142
+ # Zero out values inside the geometry for clearer visualization
143
+ Ex_pred_grid[sdf_grid < 0.0] = 0.0
144
+
145
+ plt.figure(figsize=(8, 6))
146
+ cf = plt.contourf(xx, zz, Ex_pred_grid, levels=50, cmap='RdBu_r', vmin=-1.0, vmax=1.0) # Using RdBu_r for E-field
147
+
148
+ # Plot geometry contour (SDF=0)
149
+ plt.contour(xx, zz, sdf_grid, levels=[0], colors='black', linewidths=3)
150
+
151
+ plt.title(f'Predicted $ ilde{{E}}_x$ field at $t={T_VISUALIZATION:.2f}$ (Example Usage)')
152
+ plt.xlabel('x (m)'); plt.ylabel('z (m)'); plt.colorbar(cf, label='E_x field value')
153
+ plt.axis('equal')
154
+ plt.tight_layout()
155
+ plt.show()
156
+
157
+ print("Example usage script finished successfully.")
hubconf.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import hf_hub_download
2
+ import torch
3
+ from pinn_electromagnetics.models import GeoNetHash, MaxwellPINN
4
+
5
+ def geonet(pretrained=False, map_location="cpu"):
6
+ model = GeoNetHash()
7
+ if pretrained:
8
+ file_path = hf_hub_download(repo_id="Ayda138000/DualMaxwell", filename="geonet_real_v34.pth")
9
+ model.load_state_dict(torch.load(file_path, map_location=map_location))
10
+ return model
11
+
12
+ def maxwell(pretrained=False, map_location="cpu"):
13
+ model = MaxwellPINN()
14
+ if pretrained:
15
+ file_path = hf_hub_download(repo_id="Ayda138000/DualMaxwell", filename="physnet_v34_real.pth")
16
+ model.load_state_dict(torch.load(file_path, map_location=map_location))
17
+ return model
losses.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import numpy as np # Used for constants like pi if needed elsewhere
4
+
5
+ # --- Derivative Computation (from v28) ---
6
+ def compute_all_derivatives(model_phys, x, y, z, t):
7
+ outputs = model_phys(x, y, z, t)
8
+ Ex, Ey, Ez = outputs[:, 0:1], outputs[:, 1:2], outputs[:, 2:3]
9
+ Bx, By, Bz = outputs[:, 3:4], outputs[:, 4:5], outputs[:, 5:6]
10
+
11
+ # Use .sum() for scalar output when computing gradients over multiple components
12
+ dEx_grads = torch.autograd.grad(Ex.sum(), [x, y, z, t], create_graph=True, allow_unused=True)
13
+ dEy_grads = torch.autograd.grad(Ey.sum(), [x, y, z, t], create_graph=True, allow_unused=True)
14
+ dEz_grads = torch.autograd.grad(Ez.sum(), [x, y, z, t], create_graph=True, allow_unused=True)
15
+ dBx_grads = torch.autograd.grad(Bx.sum(), [x, y, z, t], create_graph=True, allow_unused=True)
16
+ dBy_grads = torch.autograd.grad(By.sum(), [x, y, z, t], create_graph=True, allow_unused=True)
17
+ dBz_grads = torch.autograd.grad(Bz.sum(), [x, y, z, t], create_graph=True, allow_unused=True)
18
+
19
+ # Ensure gradients are not None if allow_unused=True
20
+ def get_grad(grad_tuple, idx):
21
+ return grad_tuple[idx] if grad_tuple[idx] is not None else torch.zeros_like(x)
22
+
23
+ derivs = {
24
+ 'dEx_dx': get_grad(dEx_grads, 0), 'dEx_dy': get_grad(dEx_grads, 1), 'dEx_dz': get_grad(dEx_grads, 2), 'dEx_dt': get_grad(dEx_grads, 3),
25
+ 'dEy_dx': get_grad(dEy_grads, 0), 'dEy_dy': get_grad(dEy_grads, 1), 'dEy_dz': get_grad(dEy_grads, 2), 'dEy_dt': get_grad(dEy_grads, 3),
26
+ 'dEz_dx': get_grad(dEz_grads, 0), 'dEz_dy': get_grad(dEz_grads, 1), 'dEz_dz': get_grad(dEz_grads, 2), 'dEz_dt': get_grad(dEz_grads, 3),
27
+ 'dBx_dx': get_grad(dBx_grads, 0), 'dBx_dy': get_grad(dBx_grads, 1), 'dBx_dz': get_grad(dBx_grads, 2), 'dBx_dt': get_grad(dBx_grads, 3),
28
+ 'dBy_dx': get_grad(dBy_grads, 0), 'dBy_dy': get_grad(dBy_grads, 1), 'dBy_dz': get_grad(dBy_grads, 2), 'dBy_dt': get_grad(dBy_grads, 3),
29
+ 'dBz_dx': get_grad(dBz_grads, 0), 'dBz_dy': get_grad(dBz_grads, 1), 'dBz_dz': get_grad(dBz_grads, 2), 'dBz_dt': get_grad(dBz_grads, 3),
30
+ }
31
+ return outputs, derivs
32
+
33
+ # --- Maxwell Loss (from v28) ---
34
+ def compute_maxwell_loss(derivs):
35
+ # Maxwell's Equations (dimensionless units)
36
+
37
+ # Gauss's Law for E: div(E) = 0
38
+ loss_gauss_E = (derivs['dEx_dx'] + derivs['dEy_dy'] + derivs['dEz_dz'])**2
39
+
40
+ # Gauss's Law for B: div(B) = 0
41
+ loss_gauss_B = (derivs['dBx_dx'] + derivs['dBy_dy'] + derivs['dBz_dz'])**2
42
+
43
+ # Faraday's Law: curl(E) = -dB/dt
44
+ loss_faraday_x = (derivs['dEz_dy'] - derivs['dEy_dz'] + derivs['dBx_dt'])**2
45
+ loss_faraday_y = (derivs['dEx_dz'] - derivs['dEz_dx'] + derivs['dBy_dt'])**2
46
+ loss_faraday_z = (derivs['dEy_dx'] - derivs['dEx_dy'] + derivs['dBz_dt'])**2
47
+ loss_faraday = loss_faraday_x + loss_faraday_y + loss_faraday_z
48
+
49
+ # Ampere-Maxwell Law: curl(B) = dE/dt (assuming no current J)
50
+ loss_ampere_x = (derivs['dBz_dy'] - derivs['dBy_dz'] - derivs['dEx_dt'])**2
51
+ loss_ampere_y = (derivs['dBx_dz'] - derivs['dBz_dx'] - derivs['dEy_dt'])**2
52
+ loss_ampere_z = (derivs['dBy_dx'] - derivs['dBx_dy'] - derivs['dEz_dt'])**2
53
+ loss_ampere = loss_ampere_x + loss_ampere_y + loss_ampere_z
54
+
55
+ return torch.mean(loss_gauss_E + loss_gauss_B + loss_faraday + loss_ampere)
56
+
57
+ # --- Data Loss (from v31.2, adapted for NPZ data) ---
58
+ def compute_data_loss(model_phys,
59
+ coords_tensor, E0_tensor, B0_tensor,
60
+ source_pos_tensor, source_orientation_tensor,
61
+ source_signal_tensor, t_tensor,
62
+ n_samples_ic=1024, n_samples_source=128):
63
+ loss_data = 0.0
64
+
65
+ # 1. Initial Conditions (t=0): E=E0, B=B0
66
+ idx_ic = torch.randperm(coords_tensor.shape[0], device=coords_tensor.device)[:n_samples_ic]
67
+ x_ic, y_ic, z_ic = coords_tensor[idx_ic, 0:1], coords_tensor[idx_ic, 1:2], coords_tensor[idx_ic, 2:3]
68
+ t_ic = torch.zeros_like(x_ic)
69
+
70
+ pred_ic = model_phys(x_ic, y_ic, z_ic, t_ic)
71
+ E_actual_ic, B_actual_ic = E0_tensor[idx_ic], B0_tensor[idx_ic]
72
+
73
+ loss_data += torch.mean((pred_ic[:, 0:3] - E_actual_ic)**2)
74
+ loss_data += torch.mean((pred_ic[:, 3:6] - B_actual_ic)**2)
75
+
76
+ # 2. Source Boundary (time-varying E-field, B=0)
77
+ # We use all time points for the source signal
78
+ n_times_source = t_tensor.shape[0]
79
+ # Source position needs to be expanded to match the number of time samples
80
+ x_s = source_pos_tensor[0:1].expand(n_times_source, -1)
81
+ y_s = source_pos_tensor[1:2].expand(n_times_source, -1)
82
+ z_s = source_pos_tensor[2:3].expand(n_times_source, -1)
83
+ t_s = t_tensor # shape (n_times_source, 1)
84
+
85
+ pred_source = model_phys(x_s, y_s, z_s, t_s)
86
+
87
+ # E_actual = signal * orientation. Expand orientation to match time samples.
88
+ E_actual_source = source_signal_tensor * source_orientation_tensor.view(1, 3)
89
+
90
+ loss_data += torch.mean((pred_source[:, 0:3] - E_actual_source)**2)
91
+ loss_data += torch.mean(pred_source[:, 3:6]**2) # B=0 at the source
92
+
93
+ return loss_data
models.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import numpy as np
5
+
6
+ # --- Hash Grid Encoder (from v26) ---
7
+ class HashGridEncoder(nn.Module):
8
+ def __init__(self, n_levels=16, n_features_per_level=2,
9
+ log2_hashmap_size=19, base_resolution=16,
10
+ per_level_scale=1.5):
11
+ super(HashGridEncoder, self).__init__()
12
+ self.n_levels = n_levels
13
+ self.n_features_per_level = n_features_per_level
14
+ self.log2_hashmap_size = log2_hashmap_size
15
+ self.base_resolution = base_resolution
16
+ self.per_level_scale = per_level_scale
17
+ self.hashmap_size = 2**self.log2_hashmap_size
18
+ self.embeddings = nn.ModuleList([
19
+ nn.Embedding(self.hashmap_size, self.n_features_per_level)
20
+ for i in range(self.n_levels)
21
+ ])
22
+ for emb in self.embeddings: nn.init.uniform_(emb.weight, -1e-4, 1e-4)
23
+
24
+ def hash_fn(self, coords_int, level):
25
+ primes = torch.tensor([1, 2654435761, 805459861], dtype=torch.int64, device=coords_int.device)
26
+ hashed_indices = (coords_int * primes).sum(dim=-1) % self.hashmap_size
27
+ return hashed_indices
28
+
29
+ def trilinear_interp(self, x, v000, v001, v010, v011, v100, v101, v110, v111):
30
+ w0 = (1 - x[..., 0:1]); w1 = x[..., 0:1]
31
+ c00 = v000 * w0 + v100 * w1; c01 = v001 * w0 + v101 * w1
32
+ c10 = v010 * w0 + v110 * w1; c11 = v011 * w0 + v111 * w1
33
+ c0 = c00 * (1 - x[..., 1:2]) + c10 * x[..., 1:2]
34
+ c1 = c01 * (1 - x[..., 1:2]) + c11 * x[..., 1:2]
35
+ c = c0 * (1 - x[..., 2:3]) + c1 * x[..., 2:3]
36
+ return c
37
+
38
+ def forward(self, x_coords): # x_coords: [N, 3] in [0, 1]
39
+ all_features = []
40
+ for l in range(self.n_levels):
41
+ res = int(self.base_resolution * (self.per_level_scale ** l))
42
+ x_scaled = x_coords * res
43
+ x_floor = torch.floor(x_scaled).to(torch.int64)
44
+ x_local = (x_scaled - x_floor)
45
+ corners = torch.tensor([[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]], dtype=torch.int64, device=x_coords.device)
46
+ cell_corners_int = x_floor.unsqueeze(1) + corners.unsqueeze(0)
47
+ hashed_indices = self.hash_fn(cell_corners_int, l)
48
+ embedded_features = self.embeddings[l](hashed_indices)
49
+ interp_features = self.trilinear_interp(
50
+ x_local,
51
+ embedded_features[:, 0], embedded_features[:, 1],
52
+ embedded_features[:, 2], embedded_features[:, 3],
53
+ embedded_features[:, 4], embedded_features[:, 5],
54
+ embedded_features[:, 6], embedded_features[:, 7]
55
+ )
56
+ all_features.append(interp_features)
57
+ return torch.cat(all_features, dim=-1)
58
+
59
+ # --- GeoNetHash (for geometry, from v26) ---
60
+ class GeoNetHash(nn.Module):
61
+ def __init__(self, box_size_scale=1.0):
62
+ super(GeoNetHash, self).__init__()
63
+ self.box_size_scale = box_size_scale
64
+ self.encoder = HashGridEncoder(n_levels=16, n_features_per_level=2)
65
+ self.mlp = nn.Sequential(
66
+ nn.Linear(self.encoder.n_levels * self.encoder.n_features_per_level, 64),
67
+ nn.ReLU(),
68
+ nn.Linear(64, 1)
69
+ )
70
+ # Placeholder for min/max for normalization
71
+ self.min = None
72
+ self.max = None
73
+
74
+ def normalize_coords(self, coords):
75
+ # Stores min/max from the original dataset for consistent normalization
76
+ self.min = coords.min(dim=0, keepdim=True)[0]
77
+ self.max = coords.max(dim=0, keepdim=True)[0]
78
+ self.max[self.max == self.min] += 1e-6 # Avoid division by zero
79
+ return (coords - self.min) / (self.max - self.min)
80
+
81
+ def forward(self, coords_input_normalized): # input is already normalized [0, 1]
82
+ features = self.encoder(coords_input_normalized)
83
+ return self.mlp(features)
84
+
85
+ # --- Positional Encoder 4D (for MaxwellPINN, from v19/v22) ---
86
+ class PositionalEncoder4D(nn.Module):
87
+ def __init__(self, input_dims, num_freqs):
88
+ super(PositionalEncoder4D, self).__init__()
89
+ self.input_dims = input_dims
90
+ self.num_freqs = num_freqs
91
+ self.freq_bands = 2.**torch.linspace(0., num_freqs-1, num_freqs)
92
+ self.output_dims = self.input_dims * (2 * self.num_freqs + 1)
93
+
94
+ def forward(self, x): # x is [N, input_dims]
95
+ encoding = [x]
96
+ x_freq = x.unsqueeze(-1) * self.freq_bands.to(x.device)
97
+ encoding.append(torch.sin(x_freq).view(x.shape[0], -1))
98
+ encoding.append(torch.cos(x_freq).view(x.shape[0], -1))
99
+ return torch.cat(encoding, dim=1)
100
+
101
+ # --- MaxwellPINN (for electromagnetics, from v28) ---
102
+ class MaxwellPINN(nn.Module):
103
+ def __init__(self, num_freqs=8, hidden_dim=128):
104
+ super(MaxwellPINN, self).__init__()
105
+ self.encoder = PositionalEncoder4D(input_dims=4, num_freqs=num_freqs)
106
+ encoder_output_dims = self.encoder.output_dims
107
+ self.network = nn.Sequential(
108
+ nn.Linear(encoder_output_dims, hidden_dim), nn.Tanh(),
109
+ nn.Linear(hidden_dim, hidden_dim), nn.Tanh(),
110
+ nn.Linear(hidden_dim, hidden_dim), nn.Tanh(),
111
+ nn.Linear(hidden_dim, 6) # Outputs (Ex, Ey, Ez, Bx, By, Bz)
112
+ )
113
+
114
+ def forward(self, x, y, z, t):
115
+ coords = torch.cat([x, y, z, t], dim=1)
116
+ x_encoded = self.encoder(coords)
117
+ return self.network(x_encoded)
setup.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from setuptools import setup, find_packages
3
+
4
+ setup(
5
+ name='pinn_electromagnetics',
6
+ version='0.1.0',
7
+ packages=find_packages(),
8
+ install_requires=[
9
+ 'torch>=1.9.0',
10
+ 'numpy>=1.20.0',
11
+ 'matplotlib>=3.3.0',
12
+ 'scikit-learn>=0.24.0' # If StandardScaler is used in example_usage or other scripts
13
+ ],
14
+ author='PINN Electromagnetics Team',
15
+ description='A Python package for simulating electromagnetic fields using Physics-Informed Neural Networks.',
16
+ long_description=open('README.md').read(),
17
+ long_description_content_type='text/markdown',
18
+ url='https://github.com/yourusername/pinn_electromagnetics', # Replace with your repo URL
19
+ classifiers=[
20
+ 'Programming Language :: Python :: 3',
21
+ 'License :: OSI Approved :: MIT License',
22
+ 'Operating System :: OS Independent',
23
+ 'Intended Audience :: Science/Research',
24
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
25
+ 'Topic :: Scientific/Engineering :: Physics'
26
+ ],
27
+ python_requires='>=3.7',
28
+ )