File size: 20,261 Bytes
ddadeb4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
# coding: utf-8
__author__ = 'PyTorch Backend Implementation'

import os
import pickle
import numpy as np
import torch
import torch.nn as nn
from typing import Dict, Tuple, Optional, Any
import warnings
import hashlib
import time

# Suppress channels_last warnings for 3D audio tensors
warnings.filterwarnings("ignore", message=".*channels_last.*")
warnings.filterwarnings("ignore", message=".*rank 3.*")


class PyTorchBackend:
    """
    ULTRA-OPTIMIZED PyTorch backend for model inference.
    Provides various optimization techniques for maximum speed.
    """
    
    def __init__(self, device='cuda:0', optimize_mode='channels_last'):
        """
        Initialize ULTRA-OPTIMIZED PyTorch backend.
        
        Parameters:
        ----------
        device : str
            Device to use for inference (cuda:0, cpu, mps, etc.)
        optimize_mode : str
            Optimization mode: 'channels_last' (recommended), 'compile', 'jit', or 'default'
        """
        self.device = device
        self.optimize_mode = optimize_mode
        self.model = None
        self.compiled_model = None
        
        # Check device availability
        if device.startswith('cuda') and not torch.cuda.is_available():
            warnings.warn("CUDA not available, falling back to CPU")
            self.device = 'cpu'
        elif device == 'mps' and not torch.backends.mps.is_available():
            warnings.warn("MPS not available, falling back to CPU")
            self.device = 'cpu'
        
        # Apply ultra optimization settings
        self._apply_ultra_optimizations()
    
    def _apply_ultra_optimizations(self):
        """Apply ultra-speed optimizations globally."""
        if self.device.startswith('cuda'):
            # Enable all CUDA optimizations
            torch.backends.cudnn.benchmark = True
            torch.backends.cuda.matmul.allow_tf32 = True
            torch.backends.cudnn.allow_tf32 = True
            
            # Set optimal CUDA settings
            torch.backends.cudnn.deterministic = False
            torch.backends.cudnn.enabled = True
            
            # Enable cuBLAS optimizations
            os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
        
        # Optimize CPU inference
        if self.device == 'cpu':
            import multiprocessing
            num_threads = multiprocessing.cpu_count()
            torch.set_num_threads(num_threads)
            torch.set_num_interop_threads(num_threads)
            print(f"CPU threads set to {num_threads}")
    
    def optimize_model(
        self,
        model: nn.Module,
        example_input: Optional[torch.Tensor] = None,
        use_amp: bool = True,
        use_channels_last: bool = True
    ) -> nn.Module:
        """
        Optimize PyTorch model for inference.
        
        Parameters:
        ----------
        model : nn.Module
            PyTorch model to optimize
        example_input : Optional[torch.Tensor]
            Example input for optimization (required for some modes)
        use_amp : bool
            Use automatic mixed precision (AMP)
        use_channels_last : bool
            Use channels-last memory format
            
        Returns:
        -------
        nn.Module
            Optimized model
        """
        print(f"Optimizing model with mode: {self.optimize_mode}")
        
        self.model = model.eval().to(self.device)
        self.use_amp = use_amp
        
        # Disable gradients for all parameters (inference only)
        for param in self.model.parameters():
            param.requires_grad = False
        
        # Apply memory format optimization (default: channels_last for CUDA)
        # Note: Audio models use 3D tensors, so channels_last is applied only where beneficial
        if use_channels_last and self.device.startswith('cuda'):
            print("  Using channels-last optimization")
            # Only apply to model if it has 4D conv layers, otherwise skip silently
            try:
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    self.model = self.model.to(memory_format=torch.channels_last)
            except Exception:
                pass  # Silently skip for models that don't support channels_last
        
        # Set model to inference mode
        torch.set_grad_enabled(False)
        
        # Apply optimization based on mode
        if self.optimize_mode == 'compile':
            self.compiled_model = self._compile_model(self.model)
        elif self.optimize_mode == 'jit':
            if example_input is None:
                raise ValueError("example_input required for JIT optimization")
            self.compiled_model = self._jit_trace_model(self.model, example_input)
        elif self.optimize_mode == 'channels_last':
            self.compiled_model = self.model
        else:
            print("  Using default optimization")
            self.compiled_model = self.model
        
        # Apply fusion optimizations if possible
        try:
            if hasattr(torch.nn.utils, 'fusion'):
                self.compiled_model = torch.nn.utils.fusion.fuse_conv_bn_eval(self.compiled_model)
                print("  Conv-BN fusion applied")
        except:
            pass
        
        print("Optimization complete")
        return self.compiled_model
    
    def _compile_model(self, model: nn.Module) -> nn.Module:
        """
        Compile model using torch.compile (PyTorch 2.0+) with ULTRA optimization.
        
        Parameters:
        ----------
        model : nn.Module
            Model to compile
            
        Returns:
        -------
        nn.Module
            Compiled model
        """
        try:
            if hasattr(torch, 'compile'):
                print("  Compiling model with torch.compile")
                # Try max-autotune for best performance
                try:
                    compiled = torch.compile(model, mode='max-autotune', fullgraph=True)
                    print("  Using max-autotune mode")
                    return compiled
                except:
                    # Fallback to reduce-overhead
                    compiled = torch.compile(model, mode='reduce-overhead')
                    print("  Using reduce-overhead mode")
                    return compiled
            else:
                print("  torch.compile not available (requires PyTorch 2.0+)")
                return model
        except Exception as e:
            print(f"  Compilation failed: {e}")
            return model
    
    def _jit_trace_model(self, model: nn.Module, example_input: torch.Tensor) -> nn.Module:
        """
        Trace model using TorchScript JIT.
        
        Parameters:
        ----------
        model : nn.Module
            Model to trace
        example_input : torch.Tensor
            Example input for tracing
            
        Returns:
        -------
        nn.Module
            Traced model
        """
        try:
            print("  β†’ Tracing model with TorchScript JIT")
            with torch.no_grad():
                traced = torch.jit.trace(model, example_input)
            traced = torch.jit.optimize_for_inference(traced)
            return traced
        except Exception as e:
            print(f"  JIT tracing failed: {e}")
            return model
    
    def save_optimized_model(self, save_path: str):
        """
        Save optimized model to file.
        
        Parameters:
        ----------
        save_path : str
            Path to save the model
        """
        if self.compiled_model is None:
            raise RuntimeError("No model has been optimized yet")
        
        try:
            # Save based on optimization mode
            if self.optimize_mode == 'jit':
                torch.jit.save(self.compiled_model, save_path)
            else:
                torch.save(self.compiled_model.state_dict(), save_path)
            print(f"βœ“ Model saved to: {save_path}")
        except Exception as e:
            print(f"βœ— Failed to save model: {e}")
    
    def load_optimized_model(self, load_path: str, model_template: nn.Module) -> nn.Module:
        """
        Load optimized model from file.
        
        Parameters:
        ----------
        load_path : str
            Path to the saved model
        model_template : nn.Module
            Model template for loading state dict
            
        Returns:
        -------
        nn.Module
            Loaded model
        """
        try:
            if self.optimize_mode == 'jit':
                self.compiled_model = torch.jit.load(load_path, map_location=self.device)
            else:
                model_template.load_state_dict(torch.load(load_path, map_location=self.device, weights_only=False))
                self.compiled_model = model_template.eval()
            
            print(f"βœ“ Model loaded from: {load_path}")
            return self.compiled_model
        except (pickle.UnpicklingError, RuntimeError, EOFError) as e:
            error_details = f"""
CHECKPOINT FILE CORRUPTED

Error: {str(e)}

The checkpoint file appears to be corrupted or was not downloaded correctly.
File: {load_path}

Common causes:
  - File is an HTML page (wrong download URL, e.g., HuggingFace /blob/ instead of /resolve/)
  - Incomplete or interrupted download
  - Network issues during download
  - File system corruption

Solution:
  1. Delete the corrupted checkpoint file:
     {load_path}
  2. Re-run the application - it will automatically re-download the model
  3. If the problem persists, check that your model URL uses /resolve/ not /blob/
     Example: https://huggingface.co/user/repo/resolve/main/model.ckpt
"""
            print(error_details)
            raise
        except Exception as e:
            print(f"βœ— Failed to load model: {e}")
            raise
    
    def __call__(self, x: torch.Tensor) -> torch.Tensor:
        """
        Run inference with optimized model.
        
        Parameters:
        ----------
        x : torch.Tensor
            Input tensor
            
        Returns:
        -------
        torch.Tensor
            Model output
        """
        if self.compiled_model is None:
            raise RuntimeError("No model has been optimized yet")
        
        # Apply memory format if needed (only for 4D tensors - images)
        # Audio models typically use 3D tensors, so we silently skip channels_last for them
        if self.optimize_mode == 'channels_last' and x.dim() == 4:
            x = x.to(memory_format=torch.channels_last)
        
        # Run inference with AMP if enabled
        try:
            if self.use_amp and self.device.startswith('cuda'):
                with torch.cuda.amp.autocast():
                    with torch.no_grad():
                        return self.compiled_model(x)
            else:
                with torch.no_grad():
                    return self.compiled_model(x)
        except Exception as e:
            # Fallback to non-compiled model if torch.compile fails at runtime
            # This can happen with rotary embeddings that mutate class variables
            if self.optimize_mode == 'compile' and self.model is not None:
                print(f"  ⚠️ torch.compile runtime error: {type(e).__name__}")
                print(f"  πŸ”„ Falling back to non-compiled model...")
                self.compiled_model = self.model
                self.optimize_mode = 'fallback'
                # Retry with non-compiled model
                if self.use_amp and self.device.startswith('cuda'):
                    with torch.cuda.amp.autocast():
                        with torch.no_grad():
                            return self.compiled_model(x)
                else:
                    with torch.no_grad():
                        return self.compiled_model(x)
            else:
                raise


class PyTorchOptimizer:
    """
    Helper class for various PyTorch optimization techniques.
    """
    
    @staticmethod
    def enable_cudnn_benchmark():
        """Enable cuDNN benchmark mode."""
        if torch.cuda.is_available():
            torch.backends.cudnn.benchmark = True
            torch.backends.cudnn.deterministic = False
            print("cuDNN benchmark enabled")
    
    @staticmethod
    def enable_cudnn_deterministic():
        """Enable cuDNN deterministic mode for reproducible results."""
        if torch.cuda.is_available():
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
            print("βœ“ cuDNN deterministic mode enabled")
    
    @staticmethod
    def enable_tf32():
        """Enable TF32 for Ampere GPUs (RTX 30xx+)."""
        if torch.cuda.is_available():
            torch.backends.cuda.matmul.allow_tf32 = True
            torch.backends.cudnn.allow_tf32 = True
            # Also enable for float32 matmul precision
            torch.set_float32_matmul_precision('high')  # or 'highest' for max speed
            print("TF32 enabled")
    
    @staticmethod
    def set_num_threads(num_threads: int):
        """Set number of threads for CPU inference."""
        torch.set_num_threads(num_threads)
        print(f"βœ“ Number of threads set to: {num_threads}")
    
    @staticmethod
    def optimize_for_inference(model: nn.Module) -> nn.Module:
        """
        Apply ULTRA optimization for inference.
        
        Parameters:
        ----------
        model : nn.Module
            Model to optimize
            
        Returns:
        -------
        nn.Module
            ULTRA-optimized model
        """
        model.eval()
        torch.set_grad_enabled(False)
        
        # Disable gradient computation for all parameters
        for param in model.parameters():
            param.requires_grad = False
        
        # Fuse operations if possible
        try:
            # Try to fuse batch norm
            model = torch.quantization.fuse_modules(model, inplace=True)
            print("Batch norm fused")
        except:
            pass
        
        try:
            # Try to fuse conv-bn if available
            if hasattr(torch.nn.utils, 'fusion'):
                model = torch.nn.utils.fusion.fuse_conv_bn_eval(model)
                print("Conv-BN fused")
        except:
            pass
        
        return model


def benchmark_pytorch_optimizations(
    model: nn.Module,
    input_shape: Tuple[int, ...],
    device: str = 'cuda:0',
    num_iterations: int = 100,
    warmup_iterations: int = 10
) -> Dict[str, float]:
    """
    Benchmark different PyTorch optimization techniques.
    
    Parameters:
    ----------
    model : nn.Module
        Model to benchmark
    input_shape : Tuple[int, ...]
        Input tensor shape
    device : str
        Device to use
    num_iterations : int
        Number of benchmark iterations
    warmup_iterations : int
        Number of warmup iterations
        
    Returns:
    -------
    Dict[str, float]
        Benchmark results with average inference times
    """
    results = {}
    dummy_input = torch.randn(*input_shape).to(device)
    
    optimization_modes = ['default', 'compile', 'channels_last']
    
    for mode in optimization_modes:
        print(f"\n{'='*60}")
        print(f"Benchmarking: {mode}")
        print('='*60)
        
        try:
            backend = PyTorchBackend(device=device, optimize_mode=mode)
            
            # Optimize model
            if mode == 'compile':
                optimized_model = backend.optimize_model(model, use_amp=True)
            else:
                optimized_model = backend.optimize_model(
                    model, 
                    example_input=dummy_input,
                    use_amp=True,
                    use_channels_last=(mode == 'channels_last')
                )
            
            # Warmup
            for _ in range(warmup_iterations):
                _ = backend(dummy_input)
            
            # Benchmark
            if device.startswith('cuda'):
                torch.cuda.synchronize()
            
            start = time.time()
            for _ in range(num_iterations):
                _ = backend(dummy_input)
            
            if device.startswith('cuda'):
                torch.cuda.synchronize()
            
            elapsed = (time.time() - start) / num_iterations
            results[mode] = elapsed * 1000  # Convert to ms
            
            print(f"  Average time: {results[mode]:.2f} ms")
            
        except Exception as e:
            print(f"  Failed: {e}")
            results[mode] = None
    
    return results


def create_inference_session(
    model: nn.Module,
    device: str = 'cuda:0',
    optimize_mode: str = 'default',
    enable_amp: bool = True,
    enable_tf32: bool = True,
    enable_cudnn_benchmark: bool = True
) -> PyTorchBackend:
    """
    Create an optimized inference session.
    
    Parameters:
    ----------
    model : nn.Module
        Model to use for inference
    device : str
        Device to use
    optimize_mode : str
        Optimization mode
    enable_amp : bool
        Enable automatic mixed precision
    enable_tf32 : bool
        Enable TF32 (for Ampere GPUs)
    enable_cudnn_benchmark : bool
        Enable cuDNN benchmark
        
    Returns:
    -------
    PyTorchBackend
        Configured inference session
    """
    # Apply global optimizations
    optimizer = PyTorchOptimizer()
    
    if enable_cudnn_benchmark:
        optimizer.enable_cudnn_benchmark()
    
    if enable_tf32 and device.startswith('cuda'):
        optimizer.enable_tf32()
    
    # Create backend
    backend = PyTorchBackend(device=device, optimize_mode=optimize_mode)
    backend.optimize_model(model, use_amp=enable_amp)
    
    return backend


def convert_model_to_onnx(
    model: nn.Module,
    input_shape: Tuple[int, ...],
    output_path: str,
    opset_version: int = 14
):
    """
    Convert PyTorch model to ONNX format.
    
    Parameters:
    ----------
    model : nn.Module
        Model to convert
    input_shape : Tuple[int, ...]
        Input tensor shape
    output_path : str
        Path to save ONNX model
    opset_version : int
        ONNX opset version
    """
    try:
        import onnx
        
        model.eval()
        dummy_input = torch.randn(*input_shape)
        
        print(f"Converting model to ONNX (opset {opset_version})...")
        torch.onnx.export(
            model,
            dummy_input,
            output_path,
            export_params=True,
            opset_version=opset_version,
            do_constant_folding=True,
            input_names=['input'],
            output_names=['output'],
            dynamic_axes={
                'input': {0: 'batch_size'},
                'output': {0: 'batch_size'}
            }
        )
        
        # Verify ONNX model
        onnx_model = onnx.load(output_path)
        onnx.checker.check_model(onnx_model)
        
        print(f"βœ“ ONNX model saved to: {output_path}")
        
    except ImportError:
        print("βœ— ONNX not available. Install with: pip install onnx")
    except Exception as e:
        print(f"βœ— ONNX conversion failed: {e}")


def get_model_info(model: nn.Module) -> Dict[str, Any]:
    """
    Get information about a PyTorch model.
    
    Parameters:
    ----------
    model : nn.Module
        Model to analyze
        
    Returns:
    -------
    Dict[str, Any]
        Model information
    """
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    # Estimate model size
    param_size = sum(p.nelement() * p.element_size() for p in model.parameters())
    buffer_size = sum(b.nelement() * b.element_size() for b in model.buffers())
    size_mb = (param_size + buffer_size) / (1024 ** 2)
    
    return {
        'total_parameters': total_params,
        'trainable_parameters': trainable_params,
        'model_size_mb': size_mb,
        'device': next(model.parameters()).device,
        'dtype': next(model.parameters()).dtype
    }