File size: 2,776 Bytes
fdb3169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
"""
Geometry Optimizer Module
Gradient-based optimization for geometric constraints.
"""

import torch
from typing import Dict, List, Callable


class GeometryOptimizer:
    """
    Optimizer for geometric constraints using gradient descent.
    Following GeoSDF paper methodology with AdamW optimizer.
    """
    
    def __init__(self, learning_rate: float = 0.1, max_iterations: int = 500,
                 convergence_threshold: float = 1e-6):
        self.learning_rate = learning_rate
        self.max_iterations = max_iterations
        self.convergence_threshold = convergence_threshold
    
    def optimize(self, sdf, constraints: List[Callable], 
                 weights: List[float], verbose: bool = False) -> Dict:
        """
        Optimize SDF parameters to satisfy constraints.
        
        Args:
            sdf: SDF primitive with learnable parameters
            constraints: List of constraint functions returning loss tensors
            weights: Weight for each constraint
            verbose: Print optimization progress
            
        Returns:
            Dictionary with optimization results
        """
        # Get trainable parameters
        params = [p for p in sdf.parameters() if p.requires_grad]
        if not params:
            return {'final_loss': 0.0, 'converged': True, 'iterations': 0}
        
        optimizer = torch.optim.AdamW(params, lr=self.learning_rate)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=self.max_iterations, eta_min=1e-4
        )
        
        prev_loss = float('inf')
        converged = False
        
        for i in range(self.max_iterations):
            optimizer.zero_grad()
            
            # Compute weighted sum of constraint losses
            total_loss = torch.tensor(0.0)
            for constraint, weight in zip(constraints, weights):
                loss = constraint()
                total_loss = total_loss + weight * loss
            
            # Backward pass
            total_loss.backward()
            
            # Gradient clipping for stability
            torch.nn.utils.clip_grad_norm_(params, max_norm=1.0)
            
            optimizer.step()
            scheduler.step()
            
            # Check convergence
            current_loss = total_loss.item()
            if abs(prev_loss - current_loss) < self.convergence_threshold:
                converged = True
                break
            prev_loss = current_loss
            
            if verbose and i % 100 == 0:
                print(f"  Iteration {i}: loss = {current_loss:.6f}")
        
        return {
            'final_loss': current_loss,
            'converged': converged,
            'iterations': i + 1
        }