File size: 3,636 Bytes
208eb59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
"""
export_to_lms.py — Export LoRA adapter back to LM Studio.

Workflow:
  1. Fuse LoRA adapter with base model via MLX
  2. Export to GGUF format
  3. Copy to LM Studio models directory
  4. Load via lms CLI
"""

import json
import logging
import shutil
import subprocess
import time
from pathlib import Path
from typing import Optional

log = logging.getLogger("export_to_lms")


def export_adapter_to_lms(config, version: Optional[int] = None) -> dict:
    """Export current LoRA adapter as GGUF to LM Studio.

    Args:
        config: NeuralConfig instance
        version: adapter version tag (auto if None)

    Returns:
        dict with export details
    """
    try:
        import mlx_lm
    except ImportError:
        raise RuntimeError("mlx-lm required for export")

    config.resolve_paths()

    if version is None:
        version = int(time.time()) % 100000

    model_dir = str(Path(config.model_path).parent)
    adapter_dir = config.adapter_dir
    export_name = f"{config.model_key}-tuned-v{version}"
    export_dir = Path(config.base_dir) / "exports" / export_name
    export_dir.mkdir(parents=True, exist_ok=True)

    log.info(f"Exporting adapter: {adapter_dir} + {model_dir}{export_dir}")

    # Step 1: Fuse adapter with base model
    # mlx_lm.fuse writes merged weights to output dir
    try:
        mlx_lm.fuse(
            model=model_dir,
            adapter_path=adapter_dir,
            save_path=str(export_dir / "merged"),
        )
        log.info("LoRA adapter fused with base model")
    except Exception as e:
        log.error(f"Fuse failed: {e}")
        raise

    # Step 2: Convert to GGUF
    gguf_path = export_dir / f"{export_name}.gguf"
    try:
        # Use mlx_lm convert if available
        result = subprocess.run(
            ["python3", "-m", "mlx_lm.convert",
             "--model", str(export_dir / "merged"),
             "--quantize", "--q-bits", "4",
             "-o", str(gguf_path)],
            capture_output=True, text=True, timeout=600)

        if result.returncode != 0:
            log.warning(f"GGUF convert failed: {result.stderr}")
            # Fallback: just copy the merged model
            gguf_path = export_dir / "merged"
    except Exception as e:
        log.warning(f"GGUF conversion error: {e}")
        gguf_path = export_dir / "merged"

    # Step 3: Copy to LM Studio models directory
    lms_dest = Path.home() / ".lmstudio" / "models" / "jarvis-tuned" / export_name
    try:
        lms_dest.mkdir(parents=True, exist_ok=True)
        if gguf_path.is_file():
            shutil.copy2(str(gguf_path), str(lms_dest))
        else:
            # Copy directory
            shutil.copytree(str(gguf_path), str(lms_dest), dirs_exist_ok=True)
        log.info(f"Copied to LM Studio: {lms_dest}")
    except Exception as e:
        log.warning(f"Copy to LM Studio failed: {e}")

    # Step 4: Load via lms CLI
    lms = config.lms_cli_path
    if lms:
        try:
            subprocess.run(
                [lms, "load", str(lms_dest)],
                capture_output=True, timeout=120)
            log.info(f"Loaded {export_name} in LM Studio")
        except Exception as e:
            log.warning(f"LM Studio load failed: {e}")

    # Save export metadata
    meta = {
        "export_name": export_name,
        "version": version,
        "source_model": config.model_key,
        "adapter_dir": adapter_dir,
        "gguf_path": str(gguf_path),
        "lms_path": str(lms_dest),
        "timestamp": time.time(),
    }
    with open(export_dir / "export_meta.json", "w") as f:
        json.dump(meta, f, indent=2)

    return meta