File size: 22,595 Bytes
ef6446c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
#!/usr/bin/env python3
# Copyright (C) 2024 Louis Chua Bean Chong
#
# This file is part of OpenLLM.
#
# OpenLLM is dual-licensed:
# 1. For open source use: GNU General Public License v3.0
# 2. For commercial use: Commercial License (contact for details)
#
# See LICENSE and docs/LICENSES.md for full license information.

"""
OpenLLM Model Export Script

This script implements Step 6 of the training pipeline: Model Export & Deployment.
It exports trained OpenLLM models to various formats for production inference.

Supported Formats:
- PyTorch native format (for Python inference)
- Hugging Face format (for ecosystem compatibility)
- ONNX format (for optimized cross-platform inference)

Usage:
    # PyTorch format
    python core/src/export_model.py \
        --model_dir models/small-extended-4k \
        --format pytorch \
        --output_dir exports/pytorch/

    # Hugging Face format
    python core/src/export_model.py \
        --model_dir models/small-extended-4k \
        --format huggingface \
        --output_dir exports/huggingface/

    # ONNX format
    python core/src/export_model.py \
        --model_dir models/small-extended-4k \
        --format onnx \
        --output_dir exports/onnx/ \
        --optimize_for_inference

Author: Louis Chua Bean Chong
License: GPLv3
"""

import argparse
import json
import os
import shutil
import sys
from pathlib import Path
from typing import Dict

import torch

# Add current directory to path for imports
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

from model import create_model


class ModelExporter:
    """
    Comprehensive model exporter for OpenLLM models.

    Handles export to multiple formats including PyTorch, Hugging Face,
    and ONNX for different deployment scenarios.
    """

    def __init__(self, model_dir: str, output_dir: str):
        """
        Initialize the model exporter.

        Args:
            model_dir: Directory containing trained model checkpoints
            output_dir: Base directory for exported models
        """
        self.model_dir = Path(model_dir)
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # Load model and metadata
        self.model, self.config, self.training_info = self._load_model()
        self.tokenizer_path = self._find_tokenizer()

        print("πŸ”§ ModelExporter initialized")
        print(f"  Model: {self.config.model_name}")
        print(f"  Parameters: {self.model.get_num_params():,}")
        print(f"  Output directory: {output_dir}")

    def _load_model(self):
        """Load model from checkpoint directory."""
        # Find best model checkpoint
        best_model_path = self.model_dir / "best_model.pt"
        if not best_model_path.exists():
            # Look for latest checkpoint
            checkpoints = list(self.model_dir.glob("checkpoint_step_*.pt"))
            if not checkpoints:
                raise FileNotFoundError(f"No model checkpoints found in {self.model_dir}")

            # Get latest checkpoint
            latest_checkpoint = max(checkpoints, key=lambda p: int(p.stem.split("_")[-1]))
            best_model_path = latest_checkpoint

        print(f"πŸ“‚ Loading model from {best_model_path}")

        # Load checkpoint
        checkpoint = torch.load(best_model_path, map_location="cpu")

        # Determine model size from config
        config_dict = checkpoint.get("config", {})
        n_layer = config_dict.get("n_layer", 12)

        if n_layer <= 6:
            model_size = "small"
        elif n_layer <= 12:
            model_size = "medium"
        else:
            model_size = "large"

        # Create and load model
        model = create_model(model_size)
        model.load_state_dict(checkpoint["model_state_dict"])
        model.eval()  # Set to evaluation mode

        # Extract training info
        training_info = {
            "step": checkpoint.get("step", 0),
            "best_loss": checkpoint.get("best_loss", 0.0),
            "model_size": model_size,
        }

        return model, model.config, training_info

    def _find_tokenizer(self):
        """Find tokenizer path."""
        # Try multiple possible locations
        possible_paths = [
            self.model_dir.parent / "tokenizer" / "tokenizer.model",
            Path("data/tokenizer/tokenizer.model"),
            self.model_dir / "tokenizer.model",
        ]

        for path in possible_paths:
            if path.exists():
                return str(path)

        raise FileNotFoundError("Tokenizer not found in expected locations")

    def export_pytorch(self) -> str:
        """
        Export model in PyTorch native format.

        Returns:
            Path to exported model directory
        """
        output_path = self.output_dir / "pytorch"
        output_path.mkdir(parents=True, exist_ok=True)

        print("πŸ”„ Exporting to PyTorch format...")

        # Save model state dict
        model_path = output_path / "model.pt"
        torch.save(
            {
                "model_state_dict": self.model.state_dict(),
                "config": self.config.__dict__,
                "training_info": self.training_info,
            },
            model_path,
        )

        # Save configuration
        config_path = output_path / "config.json"
        with open(config_path, "w") as f:
            json.dump(
                {
                    "model_config": self.config.__dict__,
                    "training_info": self.training_info,
                    "export_format": "pytorch",
                },
                f,
                indent=2,
            )

        # Copy tokenizer
        tokenizer_out = output_path / "tokenizer.model"
        shutil.copy2(self.tokenizer_path, tokenizer_out)

        # Create loading script
        self._create_pytorch_loader(output_path)

        print(f"βœ… PyTorch export completed: {output_path}")
        return str(output_path)

    def export_huggingface(self) -> str:
        """
        Export model in Hugging Face compatible format.

        Returns:
            Path to exported model directory
        """
        output_path = self.output_dir / "huggingface"
        output_path.mkdir(parents=True, exist_ok=True)

        print("πŸ”„ Exporting to Hugging Face format...")

        # Save model weights in HF format
        model_path = output_path / "pytorch_model.bin"
        torch.save(self.model.state_dict(), model_path)

        # Create HF-compatible config
        hf_config = {
            "architectures": ["GPTModel"],
            "model_type": "gpt",
            "vocab_size": self.config.vocab_size,
            "n_layer": self.config.n_layer,
            "n_head": self.config.n_head,
            "n_embd": self.config.n_embd,
            "block_size": self.config.block_size,
            "dropout": self.config.dropout,
            "bias": self.config.bias,
            "torch_dtype": "float32",
            "transformers_version": "4.0.0",
            "openllm_version": "0.1.0",
            "training_steps": self.training_info["step"],
            "model_size": self.training_info["model_size"],
        }

        config_path = output_path / "config.json"
        with open(config_path, "w") as f:
            json.dump(hf_config, f, indent=2)

        # Copy tokenizer with HF naming
        shutil.copy2(self.tokenizer_path, output_path / "tokenizer.model")

        # Create tokenizer config
        tokenizer_config = {
            "tokenizer_class": "SentencePieceTokenizer",
            "model_max_length": self.config.block_size,
            "vocab_size": self.config.vocab_size,
            "unk_token": "<unk>",
            "bos_token": "<s>",
            "eos_token": "</s>",
            "pad_token": "<pad>",
        }

        with open(output_path / "tokenizer_config.json", "w") as f:
            json.dump(tokenizer_config, f, indent=2)

        # Create generation config
        generation_config = {
            "max_length": 512,
            "max_new_tokens": 256,
            "temperature": 0.7,
            "top_k": 40,
            "top_p": 0.9,
            "do_sample": True,
            "pad_token_id": 0,
            "eos_token_id": 1,
            "bos_token_id": 2,
        }

        with open(output_path / "generation_config.json", "w") as f:
            json.dump(generation_config, f, indent=2)

        # Create HF loading script
        self._create_hf_loader(output_path)

        print(f"βœ… Hugging Face export completed: {output_path}")
        return str(output_path)

    def export_onnx(self, optimize_for_inference: bool = False) -> str:
        """
        Export model to ONNX format for optimized inference.

        Args:
            optimize_for_inference: Whether to apply ONNX optimizations

        Returns:
            Path to exported ONNX model
        """
        try:
            import onnx
            import onnxruntime
        except ImportError:
            raise ImportError("ONNX export requires: pip install onnx onnxruntime")

        output_path = self.output_dir / "onnx"
        output_path.mkdir(parents=True, exist_ok=True)

        print("πŸ”„ Exporting to ONNX format...")

        # Prepare model for export
        self.model.eval()

        # Create dummy input for tracing
        batch_size = 1
        seq_len = 64  # Use shorter sequence for compatibility
        dummy_input = torch.randint(0, self.config.vocab_size, (batch_size, seq_len))

        # Export to ONNX
        onnx_path = output_path / "model.onnx"

        torch.onnx.export(
            self.model,
            dummy_input,
            onnx_path,
            export_params=True,
            opset_version=11,
            do_constant_folding=True,
            input_names=["input_ids"],
            output_names=["logits"],
            dynamic_axes={
                "input_ids": {0: "batch_size", 1: "sequence_length"},
                "logits": {0: "batch_size", 1: "sequence_length"},
            },
        )

        # Verify ONNX model
        onnx_model = onnx.load(str(onnx_path))
        onnx.checker.check_model(onnx_model)

        # Apply optimizations if requested
        if optimize_for_inference:
            self._optimize_onnx_model(onnx_path)

        # Save metadata
        metadata = {
            "model_config": self.config.__dict__,
            "training_info": self.training_info,
            "export_format": "onnx",
            "input_shape": [batch_size, seq_len],
            "input_names": ["input_ids"],
            "output_names": ["logits"],
            "optimized": optimize_for_inference,
        }

        with open(output_path / "metadata.json", "w") as f:
            json.dump(metadata, f, indent=2)

        # Copy tokenizer
        shutil.copy2(self.tokenizer_path, output_path / "tokenizer.model")

        # Create ONNX inference script
        self._create_onnx_inference(output_path)

        print(f"βœ… ONNX export completed: {onnx_path}")
        return str(onnx_path)

    def _optimize_onnx_model(self, onnx_path: Path):
        """Apply ONNX optimizations for inference."""
        try:
            import onnxruntime
            from onnxruntime.tools import optimizer

            print("πŸ”§ Applying ONNX optimizations...")

            # Create optimized model
            optimized_path = onnx_path.parent / "model_optimized.onnx"

            # Apply graph optimizations
            optimizer.optimize_model(
                str(onnx_path),
                str(optimized_path),
                optimization_level=optimizer.OptimizationLevel.ORT_ENABLE_ALL,
            )

            # Replace original with optimized
            shutil.move(str(optimized_path), str(onnx_path))

            print("βœ… ONNX optimizations applied")

        except ImportError:
            print("⚠️  ONNX optimization requires onnxruntime-tools")
        except Exception as e:
            print(f"⚠️  ONNX optimization failed: {e}")

    def _create_pytorch_loader(self, output_path: Path):
        """Create PyTorch model loader script."""
        loader_script = '''#!/usr/bin/env python3
"""
PyTorch Model Loader for OpenLLM

Usage:
    from load_model import load_model, generate_text

    model, tokenizer, config = load_model(".")
    text = generate_text(model, tokenizer, "Hello world", max_length=50)
    print(text)
"""

import torch
import json
import sentencepiece as spm
from pathlib import Path

def load_model(model_dir="."):
    """Load OpenLLM model from PyTorch export."""
    model_dir = Path(model_dir)

    # Load config
    with open(model_dir / "config.json", 'r') as f:
        config_data = json.load(f)

    model_config = config_data['model_config']

    # Recreate model architecture (you'll need to have the model.py file)
    # This is a simplified loader - in practice you'd import your GPTModel class
    print(f"Model config: {model_config}")
    print("Note: You need to import and create the actual model class")

    # Load model state
    checkpoint = torch.load(model_dir / "model.pt", map_location='cpu')

    # Load tokenizer
    tokenizer = smp.SentencePieceProcessor()
    tokenizer.load(str(model_dir / "tokenizer.model"))

    return None, tokenizer, model_config  # Placeholder

def generate_text(model, tokenizer, prompt, max_length=100):
    """Generate text using the loaded model."""
    # Implement text generation
    return f"Generated text for: {prompt}"

if __name__ == "__main__":
    model, tokenizer, config = load_model()
    print(f"Model loaded with {config.get('vocab_size', 'unknown')} vocabulary size")
'''

        with open(output_path / "load_model.py", "w") as f:
            f.write(loader_script)

    def _create_hf_loader(self, output_path: Path):
        """Create Hugging Face model loader script."""
        loader_script = '''#!/usr/bin/env python3
"""
Hugging Face Compatible Loader for OpenLLM

Usage:
    # Using transformers library (if you implement custom model class)
    # from transformers import AutoModel, AutoTokenizer
    # model = AutoModel.from_pretrained(".")
    # tokenizer = AutoTokenizer.from_pretrained(".")

    # Manual loading
    from load_hf_model import load_model_manual
    model, tokenizer = load_model_manual(".")
"""

import torch
import json
import sentencepiece as smp
from pathlib import Path

def load_model_manual(model_dir="."):
    """Manually load model in HF format."""
    model_dir = Path(model_dir)

    # Load config
    with open(model_dir / "config.json", 'r') as f:
        config = json.load(f)

    # Load model weights
    state_dict = torch.load(model_dir / "pytorch_model.bin", map_location='cpu')

    # Load tokenizer
    tokenizer = smp.SentencePieceProcessor()
    tokenizer.load(str(model_dir / "tokenizer.model"))

    print(f"Loaded model: {config['model_type']} with {config['n_layer']} layers")
    print(f"Vocabulary size: {config['vocab_size']}")

    return state_dict, tokenizer

if __name__ == "__main__":
    state_dict, tokenizer = load_model_manual()
    print(f"Model weights loaded: {len(state_dict)} parameters")
    print(f"Tokenizer vocabulary: {tokenizer.vocab_size()}")
'''

        with open(output_path / "load_hf_model.py", "w") as f:
            f.write(loader_script)

    def _create_onnx_inference(self, output_path: Path):
        """Create ONNX inference script."""
        inference_script = '''#!/usr/bin/env python3
"""
ONNX Inference for OpenLLM

Usage:
    from onnx_inference import ONNXInference

    inference = ONNXInference(".")
    output = inference.generate("Hello world", max_length=50)
    print(output)
"""

import numpy as np
import json
import sentencepiece as smp
from pathlib import Path

try:
    import onnxruntime as ort
except ImportError:
    print("Install onnxruntime: pip install onnxruntime")
    ort = None

class ONNXInference:
    def __init__(self, model_dir="."):
        if ort is None:
            raise ImportError("onnxruntime not available")

        model_dir = Path(model_dir)

        # Load ONNX model
        self.session = ort.InferenceSession(str(model_dir / "model.onnx"))

        # Load metadata
        with open(model_dir / "metadata.json", 'r') as f:
            self.metadata = json.load(f)

        # Load tokenizer
        self.tokenizer = smp.SentencePieceProcessor()
        self.tokenizer.load(str(model_dir / "tokenizer.model"))

        print(f"ONNX model loaded: {self.metadata['model_config']['model_name']}")

    def predict(self, input_ids):
        """Run inference on input token IDs."""
        # Prepare input
        input_data = {"input_ids": input_ids.astype(np.int64)}

        # Run inference
        outputs = self.session.run(None, input_data)
        return outputs[0]  # logits

    def generate(self, prompt, max_length=50, temperature=0.7):
        """Generate text from prompt."""
        # Tokenize prompt
        tokens = self.tokenizer.encode(prompt)
        input_ids = np.array([tokens], dtype=np.int64)

        # Simple greedy generation (can be improved)
        generated = tokens.copy()

        for _ in range(max_length):
            if len(generated) >= 512:  # Max sequence length
                break

            # Get current input (last 64 tokens to fit ONNX model)
            current_input = np.array([generated[-64:]], dtype=np.int64)

            # Predict next token
            logits = self.predict(current_input)
            next_token_logits = logits[0, -1, :]  # Last position

            # Apply temperature and sample
            if temperature > 0:
                next_token_logits = next_token_logits / temperature
                probs = np.exp(next_token_logits) / np.sum(np.exp(next_token_logits))
                next_token = np.random.choice(len(probs), p=probs)
            else:
                next_token = np.argmax(next_token_logits)

            generated.append(int(next_token))

        # Decode generated text
        generated_text = self.tokenizer.decode(generated[len(tokens):])
        return generated_text

if __name__ == "__main__":
    inference = ONNXInference()
    result = inference.generate("The future of AI is", max_length=30)
    print(f"Generated: {result}")
'''

        with open(output_path / "onnx_inference.py", "w") as f:
            f.write(inference_script)

    def export_all_formats(self, optimize_onnx: bool = False) -> Dict[str, str]:
        """
        Export model to all supported formats.

        Args:
            optimize_onnx: Whether to optimize ONNX model

        Returns:
            Dictionary mapping format names to export paths
        """
        results = {}

        print("πŸš€ Exporting to all formats...")

        try:
            results["pytorch"] = self.export_pytorch()
        except Exception as e:
            print(f"❌ PyTorch export failed: {e}")

        try:
            results["huggingface"] = self.export_huggingface()
        except Exception as e:
            print(f"❌ Hugging Face export failed: {e}")

        try:
            results["onnx"] = self.export_onnx(optimize_onnx)
        except Exception as e:
            print(f"❌ ONNX export failed: {e}")

        # Create summary
        summary = {
            "export_timestamp": torch.datetime.now().isoformat(),
            "model_info": {
                "name": self.config.model_name,
                "parameters": self.model.get_num_params(),
                "training_steps": self.training_info["step"],
                "best_loss": self.training_info["best_loss"],
            },
            "exports": results,
        }

        with open(self.output_dir / "export_summary.json", "w") as f:
            json.dump(summary, f, indent=2)

        print(f"βœ… Export summary saved: {self.output_dir / 'export_summary.json'}")

        return results


def main():
    """Main export function."""
    parser = argparse.ArgumentParser(
        description="Export OpenLLM models to various formats",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  # Export to PyTorch format
  python core/src/export_model.py \\
    --model_dir models/small-extended-4k \\
    --format pytorch \\
    --output_dir exports/pytorch/

  # Export to Hugging Face format
  python core/src/export_model.py \\
    --model_dir models/small-extended-4k \\
    --format huggingface \\
    --output_dir exports/huggingface/

  # Export to ONNX with optimizations
  python core/src/export_model.py \\
    --model_dir models/small-extended-4k \\
    --format onnx \\
    --output_dir exports/onnx/ \\
    --optimize_for_inference

  # Export to all formats
  python core/src/export_model.py \\
    --model_dir models/small-extended-4k \\
    --format all \\
    --output_dir exports/
        """,
    )

    parser.add_argument(
        "--model_dir", required=True, help="Directory containing trained model checkpoints"
    )

    parser.add_argument(
        "--format",
        choices=["pytorch", "huggingface", "onnx", "all"],
        required=True,
        help="Export format",
    )

    parser.add_argument("--output_dir", required=True, help="Output directory for exported models")

    parser.add_argument(
        "--optimize_for_inference",
        action="store_true",
        help="Apply optimizations for inference (ONNX only)",
    )

    args = parser.parse_args()

    print("πŸ“¦ OpenLLM Model Export")
    print("=" * 50)

    try:
        # Create exporter
        exporter = ModelExporter(args.model_dir, args.output_dir)

        # Export based on format
        if args.format == "pytorch":
            result = exporter.export_pytorch()
            print(f"\nβœ… PyTorch export completed: {result}")

        elif args.format == "huggingface":
            result = exporter.export_huggingface()
            print(f"\nβœ… Hugging Face export completed: {result}")

        elif args.format == "onnx":
            result = exporter.export_onnx(args.optimize_for_inference)
            print(f"\nβœ… ONNX export completed: {result}")

        elif args.format == "all":
            results = exporter.export_all_formats(args.optimize_for_inference)
            print("\nβœ… All formats exported:")
            for fmt, path in results.items():
                print(f"  {fmt}: {path}")

        print("\nπŸŽ‰ Export completed successfully!")

    except Exception as e:
        print(f"\n❌ Export failed: {e}")
        import traceback

        traceback.print_exc()
        return False

    return True


if __name__ == "__main__":
    main()