|
|
|
|
| """
|
| @author: Mohamed Elrefaie, mohamed.elrefaie@mit.edu mohamed.elrefaie@tum.de
|
|
|
| This module is part of the research presented in the paper:
|
| "DrivAerNet++: A Large-Scale Multimodal Car Dataset with Computational Fluid Dynamics Simulations and Deep Learning Benchmarks".
|
|
|
| This module is used to define both point-cloud based and graph-based models, including RegDGCNN, PointNet, and several Graph Neural Network (GNN) models
|
| for the task of surrogate modeling of the aerodynamic drag.
|
| """
|
| import torch
|
| import torch.nn as nn
|
| from typing import Tuple
|
|
|
| device = torch.device('cuda')
|
|
|
| def intermediate(x, xx):
|
| inner = -2*torch.matmul(x.transpose(2, 1), x)
|
| torch.cuda.empty_cache()
|
| return -xx - inner
|
|
|
| def knn(x, k):
|
| x = x.to(torch.float16)
|
| xx = torch.sum(x**2, dim=1, keepdim=True)
|
| pairwise_distance = intermediate(x, xx) - xx.transpose(2, 1)
|
| torch.cuda.empty_cache()
|
| idx = pairwise_distance.topk(k=k, dim=-1)[1]
|
| return idx
|
|
|
|
|
| def get_graph_feature(x, k=20, idx=None, dim9=False):
|
| batch_size = x.size(0)
|
| num_points = x.size(2)
|
| x = x.view(batch_size, -1, num_points)
|
| if idx is None:
|
| if dim9 == False:
|
| idx = knn(x, k=k)
|
| else:
|
| idx = knn(x[:, 6:], k=k)
|
| torch.cuda.empty_cache()
|
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
| idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points
|
|
|
| idx = idx + idx_base
|
|
|
| idx = idx.view(-1)
|
|
|
| _, num_dims, _ = x.size()
|
|
|
| x = x.transpose(2, 1).contiguous()
|
| feature = x.view(batch_size*num_points, -1)[idx, :]
|
| feature = feature.view(batch_size, num_points, k, num_dims)
|
| x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
|
|
|
| feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
|
|
|
| torch.cuda.empty_cache()
|
| return feature
|
|
|
| class regDGCNN_seg(nn.Module):
|
| """
|
| Graph-based segmentation network using Dynamic Graph CNN (DGCNN) blocks.
|
| Processes point cloud inputs and produces per-point predictions.
|
|
|
| Args:
|
| output_size (int): Number of output channels per point (e.g., classes or regression dims).
|
| input_dims (int): Dimensionality of input point features.
|
| k (int): Number of nearest neighbors for graph construction.
|
| emb_dims (int): Embedding dimensionality in the bottleneck layer.
|
| dropout (float): Dropout probability for final classifier.
|
| """
|
|
|
| def __init__(
|
| self,
|
| output_size: int = 3,
|
| input_dims: int = 6,
|
| k: int = 20,
|
| emb_dims: int = 1024,
|
| dropout: float = 0.5
|
| ):
|
| super().__init__()
|
| self.output_size = output_size
|
| self.input_dims = input_dims
|
| self.k = k
|
| self.emb_dims = emb_dims
|
| self.dropout = dropout
|
|
|
|
|
| self.bn1a = nn.GroupNorm(num_groups=8, num_channels=64)
|
| self.bn1b = nn.GroupNorm(num_groups=8, num_channels=64)
|
| self.bn2a = nn.GroupNorm(num_groups=8, num_channels=64)
|
| self.bn2b = nn.GroupNorm(num_groups=8, num_channels=64)
|
| self.bn3a = nn.GroupNorm(num_groups=8, num_channels=64)
|
| self.bn3b = nn.GroupNorm(num_groups=8, num_channels=64)
|
| self.bn4a = nn.GroupNorm(num_groups=8, num_channels=64)
|
| self.bn4b = nn.GroupNorm(num_groups=8, num_channels=64)
|
|
|
|
|
| self.bn5 = nn.GroupNorm(num_groups=16, num_channels=emb_dims)
|
| self.bn6 = nn.GroupNorm(num_groups=32, num_channels=1024)
|
| self.bn7 = nn.GroupNorm(num_groups=16, num_channels=512)
|
| self.bn8 = nn.GroupNorm(num_groups=16, num_channels=256)
|
|
|
|
|
| self.conv1a = nn.Sequential(
|
| nn.Conv2d(input_dims * 2, 64, kernel_size=1, bias=False),
|
| self.bn1a,
|
| nn.LeakyReLU(0.2)
|
| )
|
| self.conv1b = nn.Sequential(
|
| nn.Conv2d(64, 64, kernel_size=1, bias=False),
|
| self.bn1b,
|
| nn.LeakyReLU(0.2)
|
| )
|
|
|
|
|
| self.conv2a = nn.Sequential(
|
| nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False),
|
| self.bn2a,
|
| nn.LeakyReLU(0.2)
|
| )
|
| self.conv2b = nn.Sequential(
|
| nn.Conv2d(64, 64, kernel_size=1, bias=False),
|
| self.bn2b,
|
| nn.LeakyReLU(0.2)
|
| )
|
|
|
|
|
| self.conv3a = nn.Sequential(
|
| nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False),
|
| self.bn3a,
|
| nn.LeakyReLU(0.2)
|
| )
|
| self.conv3b = nn.Sequential(
|
| nn.Conv2d(64, 64, kernel_size=1, bias=False),
|
| self.bn3b,
|
| nn.LeakyReLU(0.2)
|
| )
|
|
|
|
|
| self.conv4a = nn.Sequential(
|
| nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False),
|
| self.bn4a,
|
| nn.LeakyReLU(0.2)
|
| )
|
| self.conv4b = nn.Sequential(
|
| nn.Conv2d(64, 64, kernel_size=1, bias=False),
|
| self.bn4b,
|
| nn.LeakyReLU(0.2)
|
| )
|
|
|
|
|
| self.conv5 = nn.Sequential(
|
| nn.Conv1d(64 * 4, emb_dims, kernel_size=1, bias=False),
|
| self.bn5,
|
| nn.LeakyReLU(0.2)
|
| )
|
|
|
|
|
| self.conv6 = nn.Sequential(
|
| nn.Conv1d(emb_dims + 64 * 4, 1024, kernel_size=1, bias=False),
|
| self.bn6,
|
| nn.LeakyReLU(0.2)
|
| )
|
| self.conv7 = nn.Sequential(
|
| nn.Conv1d(1024, 512, kernel_size=1, bias=False),
|
| self.bn7,
|
| nn.LeakyReLU(0.2)
|
| )
|
| self.conv8 = nn.Sequential(
|
| nn.Conv1d(512, 256, kernel_size=1, bias=False),
|
| self.bn8,
|
| nn.LeakyReLU(0.2)
|
| )
|
| self.dp1 = nn.Dropout(self.dropout)
|
| self.conv9 = nn.Conv1d(256, output_size, kernel_size=1, bias=False)
|
|
|
| def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
| """
|
| Forward pass through the network.
|
| Args:
|
| inputs: Raw point cloud tensor (e.g., [B, dims, N]).
|
| device: Torch device for processing.
|
|
|
| Returns:
|
| Per-point predictions of shape [B, output_size, N] or [N, output_size] if B==1.
|
| """
|
|
|
|
|
| B, _, N = inputs.size()
|
|
|
|
|
| x1 = self._dgcnn_block(inputs, k=self.k, convs=[self.conv1a, self.conv1b])
|
|
|
| x2 = self._dgcnn_block(x1, k=self.k, convs=[self.conv2a, self.conv2b])
|
|
|
| x3 = self._dgcnn_block(x2, k=self.k, convs=[self.conv3a, self.conv3b])
|
|
|
| x4 = self._dgcnn_block(x3, k=self.k, convs=[self.conv4a, self.conv4b])
|
|
|
|
|
| local_feats = torch.cat([x1, x2, x3, x4], dim=1)
|
|
|
|
|
| emb = self.conv5(local_feats)
|
| emb = torch.max(emb, -1, keepdim=True)[0]
|
| emb = emb.repeat(1, 1, N)
|
|
|
|
|
| feats = torch.cat([emb, local_feats], dim=1)
|
|
|
|
|
| x = self.conv6(feats)
|
| x = self.conv7(x)
|
| x = self.conv8(x)
|
| x = self.dp1(x)
|
| x = self.conv9(x)
|
|
|
|
|
| if B == 1:
|
| x = x.squeeze(0).permute(1, 0)
|
| return x
|
|
|
| def _dgcnn_block(
|
| self,
|
| x: torch.Tensor,
|
| k: int,
|
| convs: Tuple[nn.Sequential, nn.Sequential]
|
| ) -> torch.Tensor:
|
| """
|
| Helper for two-layer DGCNN block: feature extraction, neighbor aggregation, and max pooling.
|
| """
|
|
|
| x = get_graph_feature(x, k=k)
|
|
|
| x = convs[0](x)
|
|
|
| x = convs[1](x)
|
|
|
| return x.max(dim=-1, keepdim=False)[0]
|
|
|
| def _single_dgcnn(
|
| self,
|
| x: torch.Tensor,
|
| k: int,
|
| conv: nn.Sequential
|
| ) -> torch.Tensor:
|
| """
|
| Single-layer DGCNN block: conv + max pooling over k neighbors.
|
| """
|
| x = get_graph_feature(x, k=k)
|
| x = conv(x)
|
| return x.max(dim=-1, keepdim=False)[0]
|
|
|
|
|
|
|