BMP commited on
Commit
e1ecf71
·
verified ·
1 Parent(s): 2e82ca2

Convert iic/speech_campplus_sv_zh_en_16k-common_advanced to MLX format

Browse files
Files changed (3) hide show
  1. __pycache__/model.cpython-312.pyc +0 -0
  2. config.json +1 -1
  3. model.py +57 -0
__pycache__/model.cpython-312.pyc CHANGED
Binary files a/__pycache__/model.cpython-312.pyc and b/__pycache__/model.cpython-312.pyc differ
 
config.json CHANGED
@@ -8,5 +8,5 @@
8
  "num_classes": null,
9
  "converted_from": "iic/speech_campplus_sv_zh_en_16k-common_advanced",
10
  "quantized": false,
11
- "conversion_date": "2026-01-16T12:06:47.419878"
12
  }
 
8
  "num_classes": null,
9
  "converted_from": "iic/speech_campplus_sv_zh_en_16k-common_advanced",
10
  "quantized": false,
11
+ "conversion_date": "2026-01-16T12:19:25.841196"
12
  }
model.py CHANGED
@@ -349,6 +349,63 @@ class CAMPPModelScopeV2(nn.Module):
349
 
350
  return embedding
351
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
 
353
  def load_model(weights_path: str, config_path: Optional[str] = None) -> CAMPPModelScopeV2:
354
  """Load model from weights and config"""
 
349
 
350
  return embedding
351
 
352
+ def load_weights(self, file_or_weights, strict: bool = True):
353
+ """
354
+ Override load_weights to handle quantized weights with dequantization
355
+
356
+ Args:
357
+ file_or_weights: Path to .npz file or list of (name, array) tuples
358
+ strict: If True, all parameters must match exactly
359
+ """
360
+ # Load weights from file if needed
361
+ if isinstance(file_or_weights, str):
362
+ loaded_weights = mx.load(file_or_weights)
363
+ else:
364
+ loaded_weights = dict(file_or_weights)
365
+
366
+ # Dequantize weights that have scales and biases
367
+ dequantized_weights = {}
368
+ quantized_names = set()
369
+
370
+ for name, array in loaded_weights.items():
371
+ # Check if this is a quantized weight by looking for scales/biases with metadata
372
+ # Format: name:qSCALES_GS64_B4 or name:qBIASES_GS64_B4
373
+ if ':qSCALES_GS' in name or ':qBIASES_GS' in name:
374
+ # Skip, will be processed when we see the main weight
375
+ continue
376
+
377
+ # Check if this weight has quantization metadata
378
+ has_quantization = any(k.startswith(f"{name}:qSCALES_GS") for k in loaded_weights.keys())
379
+
380
+ if has_quantization:
381
+ # Find the scales key to extract group_size and bits
382
+ scales_key = next(k for k in loaded_weights.keys() if k.startswith(f"{name}:qSCALES_GS"))
383
+ # Parse: name:qSCALES_GS64_B4 -> extract GS64 and B4
384
+ import re
385
+ match = re.search(r'GS(\d+)_B(\d+)', scales_key)
386
+ if match:
387
+ group_size = int(match.group(1))
388
+ bits = int(match.group(2))
389
+
390
+ # Get scales and biases
391
+ biases_key = f"{name}:qBIASES_GS{group_size}_B{bits}"
392
+ scales = loaded_weights[scales_key]
393
+ biases = loaded_weights[biases_key]
394
+
395
+ # Dequantize the weight
396
+ dequantized = mx.dequantize(array, scales, biases, group_size=group_size, bits=bits)
397
+ dequantized_weights[name] = dequantized
398
+ quantized_names.add(name)
399
+ else:
400
+ # Fallback: couldn't parse, keep original
401
+ dequantized_weights[name] = array
402
+ else:
403
+ # Regular weight (not quantized)
404
+ dequantized_weights[name] = array
405
+
406
+ # Use the parent class load_weights with dequantized weights
407
+ super().load_weights(list(dequantized_weights.items()), strict=strict)
408
+
409
 
410
  def load_model(weights_path: str, config_path: Optional[str] = None) -> CAMPPModelScopeV2:
411
  """Load model from weights and config"""