SachinSaud's picture
Upload folder using huggingface_hub
6ac44e1 verified
import torch
from torch import nn as nn
from models import normalization
from utilities import common
from models import regDGCNN_seg
device = torch.device('cuda')
class Model(nn.Module):
def __init__(self, params, core_model_name="regDGCNN_seg"):
#params is a dictionray of some parameters like k and other
super(Model, self).__init__() # intitailize the super class or the contructor of the parent class
self._params = params
self._output_life_normalizer = normalization.Normalizer(size=1, name='output_pos_normalizer')
self.k = params['k'] # value of key 'k'
self._model_type = params['model'].__name__
self._displacement_base = None
self.core_model_name = core_model_name
if core_model_name == 'regDGCNN_seg':
self.core_model = regDGCNN_seg
self.is_multigraph = False,
self.learned_model = regDGCNN_seg.regDGCNN_seg( # instatntiaing of the regDGCNN model of regdgcnn seg module (just object created which will be used later in the name of learned_model)
output_size=params['output_size'],
input_dims=params['input_size'],
k=self.k,
emb_dims=1024,
dropout=0.1
)
else:
raise ValueError(f"Unsupported core model: {self.core_model_name}")
def forward(self, inputs, is_training):
if is_training:
return self.learned_model(self.process_inputs(inputs, device))
else:
return self._update(self.learned_model(self.process_inputs(inputs, device)))
"""βœ… So, what does regDGCNN_seg.regDGCNN_seg return?
From conventions in DGCNN-style networks:
Input: [B, in_channels, N] β†’ e.g., [1, 3, 1024] for batch size 1, 3D points
Output: [B, out_channels, N] β†’ e.g., [1, 1, 1024] if you're doing per-node regression"""
"""self.process_inputs(...)
Prepares the input tensor (shape: [1, input_features, num_points])
Example: converts [N, 3] β†’ [1, 3, N] to match expected input of DGCNN.
self.learned_model(...)
Applies the core neural network (regDGCNN_seg) to this input.
Returns the raw output, e.g., predicted fatigue life per node (normalized)."""
def _update(self, per_node_network_output):
"""Integrate model outputs."""
# print("per node network output shape",per_node_network_output.shape)
fatigue_pred = self._output_life_normalizer.inverse(per_node_network_output[:, :]) #The model internally reshapes from [1, 1, N] to [N, 1], or
# and denormalize the predicted output of fatigue life.
return (fatigue_pred)
def process_inputs(self, inputs, device): #input is also a dictionary of our datasets which contains mes pos, tyope fatigue life
"""
Processes input tensors and prepares features for graph construction.
Args:
inputs (dict): A dictionary containing 'curr_pos' and 'node_type'.
device (torch.device): The device to move tensors to.
Returns:
torch.Tensor: The processed feature tensor (1, num_features, num_points).
torch.Tensor: Node type tensor for filtering.
"""
device = next(self.parameters()).device
mesh_pos = inputs['mesh_pos'].to(device)
node_type = inputs['node_type'].to(device) # Shape: (num_points, 1)
radial_distance = inputs['radial_distance'].to(device) # Shape: (num_points, num_dims)
r_norm = inputs['r_norm'].to(device) # Shape: (num_points, 1)
z_norm = inputs['z_norm'].to(device) # Shape: (num_points, 1)
d_step = inputs['d_step'].to(device) # Shape: (num_points, 1)
d_step_norm = inputs['d_step_norm'].to(device) # Shape: (num_points, 1)
#x = mesh_pos
# node_type = inputs['node_type'].to(device)
# region_node_type = inputs['region_node_type'].to(device)
# Create one-hot encodings for the categorical features
# node_type_one_hot = nn.functional.one_hot(node_type.squeeze(-1).long(), num_classes=self.num_node_type_classes).float()
# region_type_one_hot = nn.functional.one_hot(region_node_type.squeeze(-1).long(), num_classes=self.num_region_type_classes).float()
# Concatenate all features: position + node type + region type
# x = torch.cat([mesh_pos, node_type_one_hot, region_type_one_hot], dim=1)
# node_type = inputs['node_type'].to(device) # Shape: (num_points,)
# Concatenate world position with one-hot encoded node types
#x = torch.cat((mesh_pos), dim=1) # Shape: (num_points, num_features)
#....................................
# node_type = inputs['node_type'].to(device)
# region_node_type = inputs['region_node_type'].to(device)
# Create one-hot encodings for the categorical features
# node_type_one_hot = nn.functional.one_hot(node_type.squeeze(-1).long(), num_classes=self.num_node_type_classes).float()
# region_type_one_hot = nn.functional.one_hot(region_node_type.squeeze(-1).long(), num_classes=self.num_region_type_classes).float()
# Concatenate all features: position + node type + region type
# x = torch.cat([mesh_pos, node_type_one_hot, region_type_one_hot], dim=1)
#.....................................
x = torch.cat([
mesh_pos, # (N, 3)
radial_distance, # (N, 1)
r_norm, # (N, 1)
z_norm, # (N, 1)
d_step, # (N, 1)
d_step_norm, # (N, 1)
node_type # (N, 1)
], dim=1) # (N, 8)
x = x.T.unsqueeze(0) # Shape: ( num_features, num_points)
#x.T β†’ transposes the tensor from shape [N, F] to [F, N]
#unsqueeze(0) β†’ adds a batch dimension at the front
return x # [1, F, N] # Batch size 1, F features per node, N nodes (in case of only one feature other wise the final tensor would be the conactination of the one hot incoding of other featueres too)
def get_output_life_normalizer(self):
return (self._output_life_normalizer)
def save_model(self, path):
torch.save(self.learned_model, path + "_learned_model.pth")
torch.save(self._output_life_normalizer, path + "_output_life_normalizer.pth")
def load_model(self, path):
self.learned_model = torch.load(path + "_learned_model.pth")
self._output_life_normalizer = torch.load(path + "_output_life_normalizer.pth")
def evaluate(self):
self.eval()
self.learned_model.eval()