Update custom model files, README, and requirements
Browse files- asr_modeling.py +4 -5
asr_modeling.py
CHANGED
|
@@ -200,17 +200,16 @@ class ASRModel(PreTrainedModel, GenerationMixin):
|
|
| 200 |
**cache_kwargs,
|
| 201 |
)
|
| 202 |
if adapter_config_file is not None:
|
| 203 |
-
# Load saved adapter weights
|
| 204 |
-
|
| 205 |
-
|
| 206 |
from peft import PeftModel
|
| 207 |
|
| 208 |
-
adapter_dir = str(Path(adapter_config_file).parent)
|
| 209 |
# language_model is bare (not PEFT-wrapped) since we skipped _setup_lora
|
| 210 |
model.language_model = PeftModel.from_pretrained(
|
| 211 |
model.language_model,
|
| 212 |
-
|
| 213 |
is_trainable=True,
|
|
|
|
| 214 |
)
|
| 215 |
else:
|
| 216 |
# No saved adapters - initialize fresh LoRA for training
|
|
|
|
| 200 |
**cache_kwargs,
|
| 201 |
)
|
| 202 |
if adapter_config_file is not None:
|
| 203 |
+
# Load saved adapter weights using the original repo_id/path
|
| 204 |
+
# PEFT handles Hub downloads and caching internally
|
|
|
|
| 205 |
from peft import PeftModel
|
| 206 |
|
|
|
|
| 207 |
# language_model is bare (not PEFT-wrapped) since we skipped _setup_lora
|
| 208 |
model.language_model = PeftModel.from_pretrained(
|
| 209 |
model.language_model,
|
| 210 |
+
pretrained_model_name_or_path, # Use original repo_id, not cache path
|
| 211 |
is_trainable=True,
|
| 212 |
+
**cache_kwargs,
|
| 213 |
)
|
| 214 |
else:
|
| 215 |
# No saved adapters - initialize fresh LoRA for training
|