File size: 9,541 Bytes
6852edb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
"""
Unified model loading utility supporting ModelScope, HuggingFace and local path loading
"""
import importlib
import os
import logging
from pathlib import Path
import sys
import threading
from typing import Union, Optional, Dict, Any
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer
from funasr_detach import AutoModel
from transformers.models.auto import tokenization_auto, configuration_auto

# Global cache for downloaded models to avoid repeated downloads
# Key: (model_path, source)
# Value: local_model_path
_model_download_cache = {}
_download_cache_lock = threading.Lock()


class ModelSource:
    """Model source enumeration"""
    MODELSCOPE = "modelscope"
    HUGGINGFACE = "huggingface"
    LOCAL = "local"
    AUTO = "auto"  # Auto-detect


class UnifiedModelLoader:
    """Unified model loader"""

    def __init__(self):
        self.logger = logging.getLogger(__name__)

    def _cached_snapshot_download(self, model_path: str, source: str, **kwargs) -> str:
        """
        Cached version of snapshot_download to avoid repeated downloads

        Args:
            model_path: Model path or ID to download
            source: Model source ('modelscope' or 'huggingface')
            **kwargs: Additional arguments for snapshot_download

        Returns:
            Local path to downloaded model
        """
        cache_key = (model_path, source, str(sorted(kwargs.items())))

        # Check cache first
        with _download_cache_lock:
            if cache_key in _model_download_cache:
                cached_path = _model_download_cache[cache_key]
                self.logger.info(f"Using cached download for {model_path} from {source}: {cached_path}")
                return cached_path

        # Cache miss, need to download
        if source == ModelSource.MODELSCOPE:
            from modelscope.hub.snapshot_download import snapshot_download
            local_path = snapshot_download(model_path, **kwargs)
        elif source == ModelSource.HUGGINGFACE:
            from huggingface_hub import snapshot_download
            local_path = snapshot_download(model_path, **kwargs)
        else:
            raise ValueError(f"Unsupported source for cached download: {source}")

        # Cache the result
        with _download_cache_lock:
            _model_download_cache[cache_key] = local_path

        self.logger.info(f"Downloaded and cached {model_path} from {source}: {local_path}")
        return local_path

    def detect_model_source(self, model_path: str) -> str:
        """
        Automatically detect model source

        Args:
            model_path: Model path or ID

        Returns:
            Model source type
        """
        # Local path detection
        if os.path.exists(model_path) or os.path.isabs(model_path):
            return ModelSource.LOCAL

        # ModelScope format detection (usually includes username/model_name)
        if "/" in model_path and not model_path.startswith("http"):
            # If contains modelscope keyword or is known modelscope format
            if "modelscope" in model_path.lower() or self._is_modelscope_format(model_path):
                return ModelSource.MODELSCOPE
            else:
                # Default to HuggingFace
                return ModelSource.HUGGINGFACE

        return ModelSource.LOCAL

    def _is_modelscope_format(self, model_path: str) -> bool:
        """Detect if it's ModelScope format model ID"""
        # Can be judged according to known ModelScope model ID formats
        # For example: iic/speech_paraformer-large_asr_nat-zh-cantonese-en-16k-vocab8501-online
        modelscope_patterns = []
        return any(pattern in model_path for pattern in modelscope_patterns)

    @spaces.GPU
    def load_transformers_model(
        self,
        model_path: str,
        source: str = ModelSource.AUTO,
        **kwargs
    ) -> tuple:
        """
        Load Transformers model (for StepAudioTTS)

        Args:
            model_path: Model path or ID
            source: Model source, auto means auto-detect
            **kwargs: Other parameters

        Returns:
            (model, tokenizer) tuple
        """
        if source == ModelSource.AUTO:
            source = self.detect_model_source(model_path)

        self.logger.info(f"Loading Transformers model from {source}: {model_path}")

        try:
            if source == ModelSource.LOCAL:
                # Local loading
                model = AutoModelForCausalLM.from_pretrained(
                    model_path,
                    torch_dtype=kwargs.get("torch_dtype"),
                    device_map=kwargs.get("device_map", "auto"),
                    trust_remote_code=True,
                    local_files_only=True
                )
                tokenizer = AutoTokenizer.from_pretrained(
                    model_path,
                    trust_remote_code=True,
                    local_files_only=True
                )

            elif source == ModelSource.MODELSCOPE:
                # Load from ModelScope
                from modelscope import AutoModelForCausalLM as MSAutoModelForCausalLM
                from modelscope import AutoTokenizer as MSAutoTokenizer
                model_path = self._cached_snapshot_download(model_path, ModelSource.MODELSCOPE)

                model = MSAutoModelForCausalLM.from_pretrained(
                    model_path,
                    torch_dtype=kwargs.get("torch_dtype"),
                    device_map=kwargs.get("device_map", "auto"),
                    trust_remote_code=True,
                    local_files_only=True
                )
                tokenizer = MSAutoTokenizer.from_pretrained(
                    model_path,
                    trust_remote_code=True,
                    local_files_only=True
                )

            elif source == ModelSource.HUGGINGFACE:
                model_path = self._cached_snapshot_download(model_path, ModelSource.HUGGINGFACE)

                # Load from HuggingFace
                model = AutoModelForCausalLM.from_pretrained(
                    model_path,
                    torch_dtype=kwargs.get("torch_dtype"),
                    device_map=kwargs.get("device_map", "auto"),
                    trust_remote_code=True,
                    local_files_only=True
                )
                tokenizer = AutoTokenizer.from_pretrained(
                    model_path,
                    trust_remote_code=True,
                    local_files_only=True
                )

            else:
                raise ValueError(f"Unsupported model source: {source}")

            self.logger.info(f"Successfully loaded model from {source}")
            return model, tokenizer, model_path

        except Exception as e:
            self.logger.error(f"Failed to load model from {source}: {e}")
            raise

    @spaces.GPU
    def load_funasr_model(
        self,
        repo_path: str,
        model_path: str,
        source: str = ModelSource.AUTO,
        **kwargs
    ) -> AutoModel:
        """
        Load FunASR model (for StepAudioTokenizer)

        Args:
            model_path: Model path or ID
            source: Model source, auto means auto-detect
            **kwargs: Other parameters

        Returns:
            FunASR AutoModel instance
        """
        if source == ModelSource.AUTO:
            source = self.detect_model_source(model_path)
            
        self.logger.info(f"Loading FunASR model from {source}: {model_path}")

        try:
            # Extract model_revision to avoid duplicate passing
            model_revision = kwargs.pop("model_revision", "main")

            # Map ModelSource to model_hub parameter
            if source == ModelSource.LOCAL:
                model_hub = "local"
            elif source == ModelSource.MODELSCOPE:
                model_hub = "ms"
            elif source == ModelSource.HUGGINGFACE:
                model_hub = "hf"
            else:
                raise ValueError(f"Unsupported model source: {source}")

            # Use unified download_model for all cases
            model = AutoModel(
                repo_path=repo_path,
                model=model_path,
                model_hub=model_hub,
                model_revision=model_revision,
                **kwargs
            )

            self.logger.info(f"Successfully loaded FunASR model from {source}")
            return model

        except Exception as e:
            self.logger.error(f"Failed to load FunASR model from {source}: {e}")
            raise

    def resolve_model_path(
        self,
        base_path: str,
        model_name: str,
        source: str = ModelSource.AUTO
    ) -> str:
        """
        Resolve model path

        Args:
            base_path: Base path
            model_name: Model name
            source: Model source

        Returns:
            Resolved model path
        """
        if source == ModelSource.AUTO:
            # First check local path
            local_path = os.path.join(base_path, model_name)
            if os.path.exists(local_path):
                return local_path

            # If local doesn't exist, return model name for online download
            return model_name

        elif source == ModelSource.LOCAL:
            return os.path.join(base_path, model_name)

        else:
            # For online sources, directly return model name/ID
            return model_name


# Global instance
model_loader = UnifiedModelLoader()