richtext alexwengg commited on
Commit
0b8d4bb
·
0 Parent(s):

Duplicate from FluidInference/silero-vad-coreml

Browse files

Co-authored-by: Alex Weng <alexwengg@users.noreply.huggingface.co>

Files changed (35) hide show
  1. .gitattributes +51 -0
  2. README.md +94 -0
  3. config.json +1 -0
  4. graphs/yc_256ms_comparison_20250915_205721_2c04b81.png +3 -0
  5. graphs/yc_performance_20250915_205721_2c04b81.png +3 -0
  6. graphs/yc_standard_comparison_20250915_205721_2c04b81.png +3 -0
  7. silero-vad-unified-256ms-v6.0.0.mlmodelc/analytics/coremldata.bin +3 -0
  8. silero-vad-unified-256ms-v6.0.0.mlmodelc/coremldata.bin +3 -0
  9. silero-vad-unified-256ms-v6.0.0.mlmodelc/metadata.json +120 -0
  10. silero-vad-unified-256ms-v6.0.0.mlmodelc/model.mil +0 -0
  11. silero-vad-unified-256ms-v6.0.0.mlmodelc/weights/weight.bin +3 -0
  12. silero-vad-unified-256ms-v6.0.0.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
  13. silero-vad-unified-256ms-v6.0.0.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
  14. silero-vad-unified-256ms-v6.0.0.mlpackage/Manifest.json +18 -0
  15. silero-vad-unified-v6.0.0.mlmodelc/analytics/coremldata.bin +3 -0
  16. silero-vad-unified-v6.0.0.mlmodelc/coremldata.bin +3 -0
  17. silero-vad-unified-v6.0.0.mlmodelc/metadata.json +117 -0
  18. silero-vad-unified-v6.0.0.mlmodelc/model.mil +143 -0
  19. silero-vad-unified-v6.0.0.mlmodelc/weights/weight.bin +3 -0
  20. silero-vad-unified-v6.0.0.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
  21. silero-vad-unified-v6.0.0.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
  22. silero-vad-unified-v6.0.0.mlpackage/Manifest.json +18 -0
  23. silero_vad.mlmodelc/analytics/coremldata.bin +3 -0
  24. silero_vad.mlmodelc/coremldata.bin +3 -0
  25. silero_vad.mlmodelc/metadata.json +80 -0
  26. silero_vad.mlmodelc/model.mil +211 -0
  27. silero_vad.mlmodelc/weights/weight.bin +3 -0
  28. silero_vad_se_trained.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
  29. silero_vad_se_trained.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
  30. silero_vad_se_trained.mlpackage/Manifest.json +18 -0
  31. silero_vad_se_trained_4bit.mlmodelc/analytics/coremldata.bin +3 -0
  32. silero_vad_se_trained_4bit.mlmodelc/coremldata.bin +3 -0
  33. silero_vad_se_trained_4bit.mlmodelc/metadata.json +81 -0
  34. silero_vad_se_trained_4bit.mlmodelc/model.mil +211 -0
  35. silero_vad_se_trained_4bit.mlmodelc/weights/weight.bin +3 -0
.gitattributes ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ test/ambient1.mp3 filter=lfs diff=lfs merge=lfs -text
37
+ test/ambient2.mp3 filter=lfs diff=lfs merge=lfs -text
38
+ test/ambient3.mp3 filter=lfs diff=lfs merge=lfs -text
39
+ test/ambient4.mp3 filter=lfs diff=lfs merge=lfs -text
40
+ test/ambient5.mp3 filter=lfs diff=lfs merge=lfs -text
41
+ test/human1.mp3 filter=lfs diff=lfs merge=lfs -text
42
+ test/human3.mp3 filter=lfs diff=lfs merge=lfs -text
43
+ test/human5.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ silero_encoder.mlmodelc/model.espresso.weights filter=lfs diff=lfs merge=lfs -text
45
+ silero_rnn_decoder.mlmodelc/model.espresso.weights filter=lfs diff=lfs merge=lfs -text
46
+ silero_stft.mlmodelc/model.espresso.weights filter=lfs diff=lfs merge=lfs -text
47
+ comparison.png filter=lfs diff=lfs merge=lfs -text
48
+ speed.png filter=lfs diff=lfs merge=lfs -text
49
+ graphs/yc_256ms_comparison_20250915_205721_2c04b81.png filter=lfs diff=lfs merge=lfs -text
50
+ graphs/yc_performance_20250915_205721_2c04b81.png filter=lfs diff=lfs merge=lfs -text
51
+ graphs/yc_standard_comparison_20250915_205721_2c04b81.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - audio
5
+ - voice-activity-detection
6
+ - coreml
7
+ - silero
8
+ - speech
9
+ - ios
10
+ - macos
11
+ - swift
12
+ library_name: coreml
13
+ pipeline_tag: voice-activity-detection
14
+ datasets:
15
+ - alexwengg/musan_mini50
16
+ - alexwengg/musan_mini100
17
+ metrics:
18
+ - accuracy
19
+ - f1
20
+ language:
21
+ - en
22
+ base_model:
23
+ - onnx-community/silero-vad
24
+ ---
25
+
26
+
27
+ # **<span style="color:#5DAF8D">🧃 CoreML Silero VAD </span>**
28
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Chat-7289da.svg)](https://discord.gg/WNsvaCtmDe)
29
+ [![GitHub Repo stars](https://img.shields.io/github/stars/FluidInference/FluidAudio?style=flat&logo=github)](https://github.com/FluidInference/FluidAudio)
30
+
31
+ A CoreML implementation of the Silero Voice Activity
32
+ Detection (VAD) model, optimized for Apple platforms
33
+ (iOS/macOS). This repository contains pre-converted
34
+ CoreML models ready for use in Swift applications.
35
+
36
+ See FluidAudio Repo link at the top for more information
37
+
38
+ ## Model Description
39
+
40
+ **Developed by:** Silero Team (original), converted by
41
+ FluidAudio
42
+
43
+ **Model type:** Voice Activity Detection
44
+
45
+ **License:** MIT
46
+
47
+ **Parent Model:**
48
+ [silero-vad](https://github.com/snakers4/silero-vad)
49
+
50
+
51
+ This is how the model performs against the silero-vad v6.0.0 basline Pytorch JIT version
52
+
53
+ ![graphs/yc_standard_comparison_20250915_205721_2c04b81.png](graphs/yc_standard_comparison_20250915_205721_2c04b81.png)
54
+ ![graphs/yc_256ms_comparison_20250915_205721_2c04b81.png](graphs/yc_256ms_comparison_20250915_205721_2c04b81.png)
55
+
56
+ Note that we tested the quantized versions, as the model is already tiny, theres no performance imporvement at all.
57
+
58
+
59
+ This is how the different models compare in terms of speed, the 256s takes in 8 chunks of 32ms and processes it in batches so its much faster
60
+ ![graphs/yc_performance_20250915_205721_2c04b81.png](graphs/yc_performance_20250915_205721_2c04b81.png)
61
+
62
+
63
+ Conversion code is available here: [FluidInference/mobius](https://github.com/FluidInference/mobius)
64
+
65
+ ## Intended Use
66
+
67
+ ### Primary Use Cases
68
+ - Real-time voice activity detection in iOS/macOS
69
+ applications
70
+ - Speech preprocessing for ASR systems
71
+ - Audio segmentation and filtering
72
+
73
+ ## How to Use
74
+
75
+ Citation
76
+
77
+ @misc{silero-vad-coreml,
78
+ title={CoreML Silero VAD},
79
+ author={FluidAudio Team},
80
+ year={2024},
81
+
82
+ url={https://huggingface.co/alexwengg/coreml-silero-vad}
83
+ }
84
+
85
+ @misc{silero-vad,
86
+ title={Silero VAD},
87
+ author={Silero Team},
88
+ year={2021},
89
+ url={https://github.com/snakers4/silero-vad}
90
+ }
91
+
92
+
93
+ - GitHub: https://github.com/FluidAudio/FluidAudioSwift
94
+
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
graphs/yc_256ms_comparison_20250915_205721_2c04b81.png ADDED

Git LFS Details

  • SHA256: a653353ecb575cbbc187d5ca4f3f3c45c36a362d8ba5c360811dfda114e48ce6
  • Pointer size: 131 Bytes
  • Size of remote file: 582 kB
graphs/yc_performance_20250915_205721_2c04b81.png ADDED

Git LFS Details

  • SHA256: 23cbe5be12ac6a507691f771fd3fe977a0559f5efd8f63c32453fea82737a11f
  • Pointer size: 131 Bytes
  • Size of remote file: 252 kB
graphs/yc_standard_comparison_20250915_205721_2c04b81.png ADDED

Git LFS Details

  • SHA256: 97b3eb4ec97a2af8f5233686f847b2eb532a4443b26d1a34a9122789359b3497
  • Pointer size: 131 Bytes
  • Size of remote file: 436 kB
silero-vad-unified-256ms-v6.0.0.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30945d54e32c3f15ec35dc6ee32128a27a6cdc03b0a12ffab04434069c49dfb5
3
+ size 243
silero-vad-unified-256ms-v6.0.0.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c3063bd09ba71c26ede0308d7c33591d0770e971a3fcc603ccad7ba1e8fb88d
3
+ size 625
silero-vad-unified-256ms-v6.0.0.mlmodelc/metadata.json ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "Silero VAD Unified Model 256ms (STFT + Encoder + Decoder) with noisy-OR aggregation",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32 1 × 1 × 1)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 1]",
13
+ "name" : "vad_output",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float32",
20
+ "formattedType" : "MultiArray (Float32 1 × 128)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 128]",
23
+ "name" : "new_hidden_state",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float32",
30
+ "formattedType" : "MultiArray (Float32 1 × 128)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 128]",
33
+ "name" : "new_cell_state",
34
+ "type" : "MultiArray"
35
+ }
36
+ ],
37
+ "version" : "6.0.0",
38
+ "modelParameters" : [
39
+
40
+ ],
41
+ "author" : "Fluid Infernece + Silero Team",
42
+ "specificationVersion" : 6,
43
+ "storagePrecision" : "Mixed (Float16, Float32)",
44
+ "mlProgramOperationTypeHistogram" : {
45
+ "Concat" : 9,
46
+ "Lstm" : 8,
47
+ "SliceByIndex" : 41,
48
+ "Clip" : 32,
49
+ "Pow" : 16,
50
+ "Transpose" : 16,
51
+ "Sub" : 2,
52
+ "Relu" : 40,
53
+ "Squeeze" : 18,
54
+ "Cast" : 54,
55
+ "Sigmoid" : 8,
56
+ "Add" : 16,
57
+ "ExpandDims" : 26,
58
+ "Sqrt" : 8,
59
+ "Mul" : 7,
60
+ "Conv" : 48,
61
+ "Pad" : 8
62
+ },
63
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
64
+ "stateSchema" : [
65
+
66
+ ],
67
+ "isUpdatable" : "0",
68
+ "availability" : {
69
+ "macOS" : "12.0",
70
+ "tvOS" : "15.0",
71
+ "visionOS" : "1.0",
72
+ "watchOS" : "8.0",
73
+ "iOS" : "15.0",
74
+ "macCatalyst" : "15.0"
75
+ },
76
+ "modelType" : {
77
+ "name" : "MLModelType_mlProgram"
78
+ },
79
+ "inputSchema" : [
80
+ {
81
+ "hasShapeFlexibility" : "0",
82
+ "isOptional" : "0",
83
+ "dataType" : "Float32",
84
+ "formattedType" : "MultiArray (Float32 1 × 4160)",
85
+ "shortDescription" : "",
86
+ "shape" : "[1, 4160]",
87
+ "name" : "audio_input",
88
+ "type" : "MultiArray"
89
+ },
90
+ {
91
+ "hasShapeFlexibility" : "0",
92
+ "isOptional" : "0",
93
+ "dataType" : "Float32",
94
+ "formattedType" : "MultiArray (Float32 1 × 128)",
95
+ "shortDescription" : "",
96
+ "shape" : "[1, 128]",
97
+ "name" : "hidden_state",
98
+ "type" : "MultiArray"
99
+ },
100
+ {
101
+ "hasShapeFlexibility" : "0",
102
+ "isOptional" : "0",
103
+ "dataType" : "Float32",
104
+ "formattedType" : "MultiArray (Float32 1 × 128)",
105
+ "shortDescription" : "",
106
+ "shape" : "[1, 128]",
107
+ "name" : "cell_state",
108
+ "type" : "MultiArray"
109
+ }
110
+ ],
111
+ "userDefinedMetadata" : {
112
+ "com.github.apple.coremltools.conversion_date" : "2025-09-15",
113
+ "com.github.apple.coremltools.source" : "torch==2.7.0",
114
+ "com.github.apple.coremltools.version" : "9.0b1",
115
+ "com.github.apple.coremltools.source_dialect" : "TorchScript"
116
+ },
117
+ "generatedClassName" : "silero_vad_unified_256ms_v6_0_0",
118
+ "method" : "predict"
119
+ }
120
+ ]
silero-vad-unified-256ms-v6.0.0.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
silero-vad-unified-256ms-v6.0.0.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:853cf34740d3f5061f977ebe2976f7c921b064261c9c4753b3a1196f2dba42b4
3
+ size 882304
silero-vad-unified-256ms-v6.0.0.mlpackage/Data/com.apple.CoreML/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a996535cd1f1ac5f92bab1a66edb56bf054ef48c8742338b07ec5b892e2613c2
3
+ size 157925
silero-vad-unified-256ms-v6.0.0.mlpackage/Data/com.apple.CoreML/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:853cf34740d3f5061f977ebe2976f7c921b064261c9c4753b3a1196f2dba42b4
3
+ size 882304
silero-vad-unified-256ms-v6.0.0.mlpackage/Manifest.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fileFormatVersion": "1.0.0",
3
+ "itemInfoEntries": {
4
+ "413A9C09-1AD3-4AA9-B5E2-5C5BA0FB1FCD": {
5
+ "author": "com.apple.CoreML",
6
+ "description": "CoreML Model Specification",
7
+ "name": "model.mlmodel",
8
+ "path": "com.apple.CoreML/model.mlmodel"
9
+ },
10
+ "DFAFD1E0-B2F3-4E3C-80EC-C2F0FC9E061D": {
11
+ "author": "com.apple.CoreML",
12
+ "description": "CoreML Model Weights",
13
+ "name": "weights",
14
+ "path": "com.apple.CoreML/weights"
15
+ }
16
+ },
17
+ "rootModelIdentifier": "413A9C09-1AD3-4AA9-B5E2-5C5BA0FB1FCD"
18
+ }
silero-vad-unified-v6.0.0.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2141be60ea0adf7acb1232fbcfaffb2be308ae02e6672d3762aedf36611ea9fd
3
+ size 243
silero-vad-unified-v6.0.0.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f460dcdf796b19c04bc38ab6e69601831f634e5e74d499487f0c8fe17ca12f0f
3
+ size 593
silero-vad-unified-v6.0.0.mlmodelc/metadata.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "Silero VAD Unified Model (STFT + Encoder + Decoder)",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32 1 × 1 × 1)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 1]",
13
+ "name" : "vad_output",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float32",
20
+ "formattedType" : "MultiArray (Float32 1 × 128)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 128]",
23
+ "name" : "new_hidden_state",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float32",
30
+ "formattedType" : "MultiArray (Float32 1 × 128)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 128]",
33
+ "name" : "new_cell_state",
34
+ "type" : "MultiArray"
35
+ }
36
+ ],
37
+ "version" : "6.0.0",
38
+ "modelParameters" : [
39
+
40
+ ],
41
+ "author" : "Fluid Infernece + Silero Team",
42
+ "specificationVersion" : 6,
43
+ "storagePrecision" : "Mixed (Float16, Float32)",
44
+ "mlProgramOperationTypeHistogram" : {
45
+ "Lstm" : 1,
46
+ "SliceByIndex" : 2,
47
+ "Clip" : 4,
48
+ "Transpose" : 2,
49
+ "Pow" : 2,
50
+ "Relu" : 5,
51
+ "Squeeze" : 4,
52
+ "Cast" : 12,
53
+ "Sigmoid" : 1,
54
+ "Add" : 2,
55
+ "ExpandDims" : 5,
56
+ "Sqrt" : 1,
57
+ "Conv" : 6,
58
+ "Pad" : 1
59
+ },
60
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
61
+ "stateSchema" : [
62
+
63
+ ],
64
+ "isUpdatable" : "0",
65
+ "availability" : {
66
+ "macOS" : "12.0",
67
+ "tvOS" : "15.0",
68
+ "visionOS" : "1.0",
69
+ "watchOS" : "8.0",
70
+ "iOS" : "15.0",
71
+ "macCatalyst" : "15.0"
72
+ },
73
+ "modelType" : {
74
+ "name" : "MLModelType_mlProgram"
75
+ },
76
+ "inputSchema" : [
77
+ {
78
+ "hasShapeFlexibility" : "0",
79
+ "isOptional" : "0",
80
+ "dataType" : "Float32",
81
+ "formattedType" : "MultiArray (Float32 1 × 576)",
82
+ "shortDescription" : "",
83
+ "shape" : "[1, 576]",
84
+ "name" : "audio_input",
85
+ "type" : "MultiArray"
86
+ },
87
+ {
88
+ "hasShapeFlexibility" : "0",
89
+ "isOptional" : "0",
90
+ "dataType" : "Float32",
91
+ "formattedType" : "MultiArray (Float32 1 × 128)",
92
+ "shortDescription" : "",
93
+ "shape" : "[1, 128]",
94
+ "name" : "hidden_state",
95
+ "type" : "MultiArray"
96
+ },
97
+ {
98
+ "hasShapeFlexibility" : "0",
99
+ "isOptional" : "0",
100
+ "dataType" : "Float32",
101
+ "formattedType" : "MultiArray (Float32 1 × 128)",
102
+ "shortDescription" : "",
103
+ "shape" : "[1, 128]",
104
+ "name" : "cell_state",
105
+ "type" : "MultiArray"
106
+ }
107
+ ],
108
+ "userDefinedMetadata" : {
109
+ "com.github.apple.coremltools.conversion_date" : "2025-09-15",
110
+ "com.github.apple.coremltools.source" : "torch==2.7.0",
111
+ "com.github.apple.coremltools.version" : "9.0b1",
112
+ "com.github.apple.coremltools.source_dialect" : "TorchScript"
113
+ },
114
+ "generatedClassName" : "silero_vad_unified_v6_0_0",
115
+ "method" : "predict"
116
+ }
117
+ ]
silero-vad-unified-v6.0.0.mlmodelc/model.mil ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3500.14.1"}, {"coremlc-version", "3500.32.1"}, {"coremltools-component-torch", "2.7.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})]
3
+ {
4
+ func main<ios15>(tensor<fp32, [1, 576]> audio_input, tensor<fp32, [1, 128]> cell_state, tensor<fp32, [1, 128]> hidden_state) {
5
+ tensor<int32, [4]> x_1_pad_0 = const()[name = tensor<string, []>("x_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 64])];
6
+ tensor<string, []> x_1_mode_0 = const()[name = tensor<string, []>("x_1_mode_0"), val = tensor<string, []>("reflect")];
7
+ tensor<string, []> audio_input_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_input_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
8
+ tensor<fp16, []> const_0_to_fp16 = const()[name = tensor<string, []>("const_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
9
+ tensor<fp16, [1, 576]> audio_input_to_fp16 = cast(dtype = audio_input_to_fp16_dtype_0, x = audio_input)[name = tensor<string, []>("cast_11")];
10
+ tensor<fp16, [1, 640]> x_1_cast_fp16 = pad(constant_val = const_0_to_fp16, mode = x_1_mode_0, pad = x_1_pad_0, x = audio_input_to_fp16)[name = tensor<string, []>("x_1_cast_fp16")];
11
+ tensor<int32, [1]> x_3_axes_0 = const()[name = tensor<string, []>("x_3_axes_0"), val = tensor<int32, [1]>([1])];
12
+ tensor<fp16, [1, 1, 640]> x_3_cast_fp16 = expand_dims(axes = x_3_axes_0, x = x_1_cast_fp16)[name = tensor<string, []>("x_3_cast_fp16")];
13
+ tensor<string, []> stft_out_pad_type_0 = const()[name = tensor<string, []>("stft_out_pad_type_0"), val = tensor<string, []>("valid")];
14
+ tensor<int32, [1]> stft_out_strides_0 = const()[name = tensor<string, []>("stft_out_strides_0"), val = tensor<int32, [1]>([128])];
15
+ tensor<int32, [2]> stft_out_pad_0 = const()[name = tensor<string, []>("stft_out_pad_0"), val = tensor<int32, [2]>([0, 0])];
16
+ tensor<int32, [1]> stft_out_dilations_0 = const()[name = tensor<string, []>("stft_out_dilations_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<int32, []> stft_out_groups_0 = const()[name = tensor<string, []>("stft_out_groups_0"), val = tensor<int32, []>(1)];
18
+ tensor<fp16, [258, 1, 256]> stft_forward_basis_to_fp16 = const()[name = tensor<string, []>("stft_forward_basis_to_fp16"), val = tensor<fp16, [258, 1, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
19
+ tensor<fp16, [1, 258, 4]> stft_out_cast_fp16 = conv(dilations = stft_out_dilations_0, groups = stft_out_groups_0, pad = stft_out_pad_0, pad_type = stft_out_pad_type_0, strides = stft_out_strides_0, weight = stft_forward_basis_to_fp16, x = x_3_cast_fp16)[name = tensor<string, []>("stft_out_cast_fp16")];
20
+ tensor<int32, [3]> var_28_begin_0 = const()[name = tensor<string, []>("op_28_begin_0"), val = tensor<int32, [3]>([0, 0, 0])];
21
+ tensor<int32, [3]> var_28_end_0 = const()[name = tensor<string, []>("op_28_end_0"), val = tensor<int32, [3]>([1, 129, 4])];
22
+ tensor<bool, [3]> var_28_end_mask_0 = const()[name = tensor<string, []>("op_28_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
23
+ tensor<fp16, [1, 129, 4]> var_28_cast_fp16 = slice_by_index(begin = var_28_begin_0, end = var_28_end_0, end_mask = var_28_end_mask_0, x = stft_out_cast_fp16)[name = tensor<string, []>("op_28_cast_fp16")];
24
+ tensor<int32, [3]> var_31_begin_0 = const()[name = tensor<string, []>("op_31_begin_0"), val = tensor<int32, [3]>([0, 129, 0])];
25
+ tensor<int32, [3]> var_31_end_0 = const()[name = tensor<string, []>("op_31_end_0"), val = tensor<int32, [3]>([1, 258, 4])];
26
+ tensor<bool, [3]> var_31_end_mask_0 = const()[name = tensor<string, []>("op_31_end_mask_0"), val = tensor<bool, [3]>([true, true, true])];
27
+ tensor<fp16, [1, 129, 4]> var_31_cast_fp16 = slice_by_index(begin = var_31_begin_0, end = var_31_end_0, end_mask = var_31_end_mask_0, x = stft_out_cast_fp16)[name = tensor<string, []>("op_31_cast_fp16")];
28
+ tensor<fp16, []> var_7_promoted_to_fp16 = const()[name = tensor<string, []>("op_7_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
29
+ tensor<fp16, [1, 129, 4]> var_33_cast_fp16 = pow(x = var_28_cast_fp16, y = var_7_promoted_to_fp16)[name = tensor<string, []>("op_33_cast_fp16")];
30
+ tensor<fp16, []> var_7_promoted_1_to_fp16 = const()[name = tensor<string, []>("op_7_promoted_1_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
31
+ tensor<fp16, [1, 129, 4]> var_34_cast_fp16 = pow(x = var_31_cast_fp16, y = var_7_promoted_1_to_fp16)[name = tensor<string, []>("op_34_cast_fp16")];
32
+ tensor<fp16, [1, 129, 4]> var_35_cast_fp16 = add(x = var_33_cast_fp16, y = var_34_cast_fp16)[name = tensor<string, []>("op_35_cast_fp16")];
33
+ tensor<fp16, []> var_36_to_fp16 = const()[name = tensor<string, []>("op_36_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
34
+ tensor<fp16, [1, 129, 4]> var_37_cast_fp16 = add(x = var_35_cast_fp16, y = var_36_to_fp16)[name = tensor<string, []>("op_37_cast_fp16")];
35
+ tensor<fp16, [1, 129, 4]> input_1_cast_fp16 = sqrt(x = var_37_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
36
+ tensor<string, []> input_3_pad_type_0 = const()[name = tensor<string, []>("input_3_pad_type_0"), val = tensor<string, []>("custom")];
37
+ tensor<int32, [2]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [2]>([1, 1])];
38
+ tensor<int32, [1]> input_3_strides_0 = const()[name = tensor<string, []>("input_3_strides_0"), val = tensor<int32, [1]>([1])];
39
+ tensor<int32, [1]> input_3_dilations_0 = const()[name = tensor<string, []>("input_3_dilations_0"), val = tensor<int32, [1]>([1])];
40
+ tensor<int32, []> input_3_groups_0 = const()[name = tensor<string, []>("input_3_groups_0"), val = tensor<int32, []>(1)];
41
+ tensor<fp16, [128, 129, 3]> encoder_layers_0_weight_to_fp16 = const()[name = tensor<string, []>("encoder_layers_0_weight_to_fp16"), val = tensor<fp16, [128, 129, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132224)))];
42
+ tensor<fp16, [128]> encoder_layers_0_bias_to_fp16 = const()[name = tensor<string, []>("encoder_layers_0_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(231360)))];
43
+ tensor<fp16, [1, 128, 4]> input_3_cast_fp16 = conv(bias = encoder_layers_0_bias_to_fp16, dilations = input_3_dilations_0, groups = input_3_groups_0, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = input_3_strides_0, weight = encoder_layers_0_weight_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
44
+ tensor<fp16, [1, 128, 4]> x_5_cast_fp16 = relu(x = input_3_cast_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
45
+ tensor<fp16, []> const_1_to_fp16 = const()[name = tensor<string, []>("const_1_to_fp16"), val = tensor<fp16, []>(-inf)];
46
+ tensor<fp16, []> var_40_to_fp16 = const()[name = tensor<string, []>("op_40_to_fp16"), val = tensor<fp16, []>(0x1.388p+13)];
47
+ tensor<fp16, [1, 128, 4]> clip_0_cast_fp16 = clip(alpha = const_1_to_fp16, beta = var_40_to_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("clip_0_cast_fp16")];
48
+ tensor<string, []> input_7_pad_type_0 = const()[name = tensor<string, []>("input_7_pad_type_0"), val = tensor<string, []>("custom")];
49
+ tensor<int32, [2]> input_7_pad_0 = const()[name = tensor<string, []>("input_7_pad_0"), val = tensor<int32, [2]>([1, 1])];
50
+ tensor<int32, [1]> input_7_strides_0 = const()[name = tensor<string, []>("input_7_strides_0"), val = tensor<int32, [1]>([2])];
51
+ tensor<int32, [1]> input_7_dilations_0 = const()[name = tensor<string, []>("input_7_dilations_0"), val = tensor<int32, [1]>([1])];
52
+ tensor<int32, []> input_7_groups_0 = const()[name = tensor<string, []>("input_7_groups_0"), val = tensor<int32, []>(1)];
53
+ tensor<fp16, [64, 128, 3]> encoder_layers_2_weight_to_fp16 = const()[name = tensor<string, []>("encoder_layers_2_weight_to_fp16"), val = tensor<fp16, [64, 128, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(231680)))];
54
+ tensor<fp16, [64]> encoder_layers_2_bias_to_fp16 = const()[name = tensor<string, []>("encoder_layers_2_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(280896)))];
55
+ tensor<fp16, [1, 64, 2]> input_7_cast_fp16 = conv(bias = encoder_layers_2_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = encoder_layers_2_weight_to_fp16, x = clip_0_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
56
+ tensor<fp16, [1, 64, 2]> x_7_cast_fp16 = relu(x = input_7_cast_fp16)[name = tensor<string, []>("x_7_cast_fp16")];
57
+ tensor<fp16, []> const_2_to_fp16 = const()[name = tensor<string, []>("const_2_to_fp16"), val = tensor<fp16, []>(-inf)];
58
+ tensor<fp16, [1, 64, 2]> clip_1_cast_fp16 = clip(alpha = const_2_to_fp16, beta = var_40_to_fp16, x = x_7_cast_fp16)[name = tensor<string, []>("clip_1_cast_fp16")];
59
+ tensor<string, []> input_11_pad_type_0 = const()[name = tensor<string, []>("input_11_pad_type_0"), val = tensor<string, []>("custom")];
60
+ tensor<int32, [2]> input_11_pad_0 = const()[name = tensor<string, []>("input_11_pad_0"), val = tensor<int32, [2]>([1, 1])];
61
+ tensor<int32, [1]> input_11_strides_0 = const()[name = tensor<string, []>("input_11_strides_0"), val = tensor<int32, [1]>([2])];
62
+ tensor<int32, [1]> input_11_dilations_0 = const()[name = tensor<string, []>("input_11_dilations_0"), val = tensor<int32, [1]>([1])];
63
+ tensor<int32, []> input_11_groups_0 = const()[name = tensor<string, []>("input_11_groups_0"), val = tensor<int32, []>(1)];
64
+ tensor<fp16, [64, 64, 3]> encoder_layers_4_weight_to_fp16 = const()[name = tensor<string, []>("encoder_layers_4_weight_to_fp16"), val = tensor<fp16, [64, 64, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281088)))];
65
+ tensor<fp16, [64]> encoder_layers_4_bias_to_fp16 = const()[name = tensor<string, []>("encoder_layers_4_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(305728)))];
66
+ tensor<fp16, [1, 64, 1]> input_11_cast_fp16 = conv(bias = encoder_layers_4_bias_to_fp16, dilations = input_11_dilations_0, groups = input_11_groups_0, pad = input_11_pad_0, pad_type = input_11_pad_type_0, strides = input_11_strides_0, weight = encoder_layers_4_weight_to_fp16, x = clip_1_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
67
+ tensor<fp16, [1, 64, 1]> x_9_cast_fp16 = relu(x = input_11_cast_fp16)[name = tensor<string, []>("x_9_cast_fp16")];
68
+ tensor<fp16, []> const_3_to_fp16 = const()[name = tensor<string, []>("const_3_to_fp16"), val = tensor<fp16, []>(-inf)];
69
+ tensor<fp16, [1, 64, 1]> clip_2_cast_fp16 = clip(alpha = const_3_to_fp16, beta = var_40_to_fp16, x = x_9_cast_fp16)[name = tensor<string, []>("clip_2_cast_fp16")];
70
+ tensor<string, []> input_15_pad_type_0 = const()[name = tensor<string, []>("input_15_pad_type_0"), val = tensor<string, []>("custom")];
71
+ tensor<int32, [2]> input_15_pad_0 = const()[name = tensor<string, []>("input_15_pad_0"), val = tensor<int32, [2]>([1, 1])];
72
+ tensor<int32, [1]> input_15_strides_0 = const()[name = tensor<string, []>("input_15_strides_0"), val = tensor<int32, [1]>([1])];
73
+ tensor<int32, [1]> input_15_dilations_0 = const()[name = tensor<string, []>("input_15_dilations_0"), val = tensor<int32, [1]>([1])];
74
+ tensor<int32, []> input_15_groups_0 = const()[name = tensor<string, []>("input_15_groups_0"), val = tensor<int32, []>(1)];
75
+ tensor<fp16, [128, 64, 3]> encoder_layers_6_weight_to_fp16 = const()[name = tensor<string, []>("encoder_layers_6_weight_to_fp16"), val = tensor<fp16, [128, 64, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(305920)))];
76
+ tensor<fp16, [128]> encoder_layers_6_bias_to_fp16 = const()[name = tensor<string, []>("encoder_layers_6_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(355136)))];
77
+ tensor<fp16, [1, 128, 1]> input_15_cast_fp16 = conv(bias = encoder_layers_6_bias_to_fp16, dilations = input_15_dilations_0, groups = input_15_groups_0, pad = input_15_pad_0, pad_type = input_15_pad_type_0, strides = input_15_strides_0, weight = encoder_layers_6_weight_to_fp16, x = clip_2_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
78
+ tensor<fp16, [1, 128, 1]> x_11_cast_fp16 = relu(x = input_15_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
79
+ tensor<fp16, []> const_4_to_fp16 = const()[name = tensor<string, []>("const_4_to_fp16"), val = tensor<fp16, []>(-inf)];
80
+ tensor<fp16, [1, 128, 1]> clip_3_cast_fp16 = clip(alpha = const_4_to_fp16, beta = var_40_to_fp16, x = x_11_cast_fp16)[name = tensor<string, []>("clip_3_cast_fp16")];
81
+ tensor<int32, [3]> transpose_0_perm_0 = const()[name = tensor<string, []>("transpose_0_perm_0"), val = tensor<int32, [3]>([2, 0, 1])];
82
+ tensor<string, []> transpose_0_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("transpose_0_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
83
+ tensor<int32, [1]> hx_1_axes_0 = const()[name = tensor<string, []>("hx_1_axes_0"), val = tensor<int32, [1]>([0])];
84
+ tensor<string, []> hidden_state_to_fp16_dtype_0 = const()[name = tensor<string, []>("hidden_state_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
85
+ tensor<fp16, [1, 128]> hidden_state_to_fp16 = cast(dtype = hidden_state_to_fp16_dtype_0, x = hidden_state)[name = tensor<string, []>("cast_9")];
86
+ tensor<fp16, [1, 1, 128]> hx_1_cast_fp16 = expand_dims(axes = hx_1_axes_0, x = hidden_state_to_fp16)[name = tensor<string, []>("hx_1_cast_fp16")];
87
+ tensor<int32, [1]> hx_axes_0 = const()[name = tensor<string, []>("hx_axes_0"), val = tensor<int32, [1]>([0])];
88
+ tensor<string, []> cell_state_to_fp16_dtype_0 = const()[name = tensor<string, []>("cell_state_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
89
+ tensor<fp16, [1, 128]> cell_state_to_fp16 = cast(dtype = cell_state_to_fp16_dtype_0, x = cell_state)[name = tensor<string, []>("cast_8")];
90
+ tensor<fp16, [1, 1, 128]> hx_cast_fp16 = expand_dims(axes = hx_axes_0, x = cell_state_to_fp16)[name = tensor<string, []>("hx_cast_fp16")];
91
+ tensor<fp32, [512]> concat_0 = const()[name = tensor<string, []>("concat_0"), val = tensor<fp32, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(355456)))];
92
+ tensor<fp32, [512, 128]> concat_1 = const()[name = tensor<string, []>("concat_1"), val = tensor<fp32, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(357568)))];
93
+ tensor<fp32, [512, 128]> concat_2 = const()[name = tensor<string, []>("concat_2"), val = tensor<fp32, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(619776)))];
94
+ tensor<int32, [1]> lstm_out_batch_first_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("lstm_out_batch_first_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
95
+ tensor<fp16, [1, 128]> lstm_out_batch_first_lstm_h0_squeeze_cast_fp16 = squeeze(axes = lstm_out_batch_first_lstm_h0_squeeze_axes_0, x = hx_1_cast_fp16)[name = tensor<string, []>("lstm_out_batch_first_lstm_h0_squeeze_cast_fp16")];
96
+ tensor<string, []> lstm_out_batch_first_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("lstm_out_batch_first_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
97
+ tensor<int32, [1]> lstm_out_batch_first_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("lstm_out_batch_first_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
98
+ tensor<fp16, [1, 128]> lstm_out_batch_first_lstm_c0_squeeze_cast_fp16 = squeeze(axes = lstm_out_batch_first_lstm_c0_squeeze_axes_0, x = hx_cast_fp16)[name = tensor<string, []>("lstm_out_batch_first_lstm_c0_squeeze_cast_fp16")];
99
+ tensor<string, []> lstm_out_batch_first_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("lstm_out_batch_first_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
100
+ tensor<string, []> lstm_out_batch_first_direction_0 = const()[name = tensor<string, []>("lstm_out_batch_first_direction_0"), val = tensor<string, []>("forward")];
101
+ tensor<bool, []> lstm_out_batch_first_output_sequence_0 = const()[name = tensor<string, []>("lstm_out_batch_first_output_sequence_0"), val = tensor<bool, []>(true)];
102
+ tensor<string, []> lstm_out_batch_first_recurrent_activation_0 = const()[name = tensor<string, []>("lstm_out_batch_first_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
103
+ tensor<string, []> lstm_out_batch_first_cell_activation_0 = const()[name = tensor<string, []>("lstm_out_batch_first_cell_activation_0"), val = tensor<string, []>("tanh")];
104
+ tensor<string, []> lstm_out_batch_first_activation_0 = const()[name = tensor<string, []>("lstm_out_batch_first_activation_0"), val = tensor<string, []>("tanh")];
105
+ tensor<fp32, [1, 128]> lstm_out_batch_first_lstm_c0_squeeze_cast_fp16_to_fp32 = cast(dtype = lstm_out_batch_first_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0, x = lstm_out_batch_first_lstm_c0_squeeze_cast_fp16)[name = tensor<string, []>("cast_6")];
106
+ tensor<fp32, [1, 128]> lstm_out_batch_first_lstm_h0_squeeze_cast_fp16_to_fp32 = cast(dtype = lstm_out_batch_first_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0, x = lstm_out_batch_first_lstm_h0_squeeze_cast_fp16)[name = tensor<string, []>("cast_7")];
107
+ tensor<fp16, [1, 1, 128]> transpose_0_cast_fp16 = transpose(perm = transpose_0_perm_0, x = clip_3_cast_fp16)[name = tensor<string, []>("transpose_3")];
108
+ tensor<fp32, [1, 1, 128]> transpose_0_cast_fp16_to_fp32 = cast(dtype = transpose_0_cast_fp16_to_fp32_dtype_0, x = transpose_0_cast_fp16)[name = tensor<string, []>("cast_10")];
109
+ tensor<fp32, [1, 1, 128]> lstm_out_batch_first_0, tensor<fp32, [1, 128]> lstm_out_batch_first_1, tensor<fp32, [1, 128]> lstm_out_batch_first_2 = lstm(activation = lstm_out_batch_first_activation_0, bias = concat_0, cell_activation = lstm_out_batch_first_cell_activation_0, direction = lstm_out_batch_first_direction_0, initial_c = lstm_out_batch_first_lstm_c0_squeeze_cast_fp16_to_fp32, initial_h = lstm_out_batch_first_lstm_h0_squeeze_cast_fp16_to_fp32, output_sequence = lstm_out_batch_first_output_sequence_0, recurrent_activation = lstm_out_batch_first_recurrent_activation_0, weight_hh = concat_2, weight_ih = concat_1, x = transpose_0_cast_fp16_to_fp32)[name = tensor<string, []>("lstm_out_batch_first")];
110
+ tensor<int32, [3]> transpose_1_perm_0 = const()[name = tensor<string, []>("transpose_1_perm_0"), val = tensor<int32, [3]>([1, 2, 0])];
111
+ tensor<string, []> lstm_out_batch_first_0_to_fp16_dtype_0 = const()[name = tensor<string, []>("lstm_out_batch_first_0_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
112
+ tensor<int32, [1]> hn_axes_0 = const()[name = tensor<string, []>("hn_axes_0"), val = tensor<int32, [1]>([0])];
113
+ tensor<string, []> lstm_out_batch_first_1_to_fp16_dtype_0 = const()[name = tensor<string, []>("lstm_out_batch_first_1_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
114
+ tensor<fp16, [1, 128]> lstm_out_batch_first_1_to_fp16 = cast(dtype = lstm_out_batch_first_1_to_fp16_dtype_0, x = lstm_out_batch_first_1)[name = tensor<string, []>("cast_4")];
115
+ tensor<fp16, [1, 1, 128]> hn_cast_fp16 = expand_dims(axes = hn_axes_0, x = lstm_out_batch_first_1_to_fp16)[name = tensor<string, []>("hn_cast_fp16")];
116
+ tensor<int32, [1]> cn_axes_0 = const()[name = tensor<string, []>("cn_axes_0"), val = tensor<int32, [1]>([0])];
117
+ tensor<string, []> lstm_out_batch_first_2_to_fp16_dtype_0 = const()[name = tensor<string, []>("lstm_out_batch_first_2_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
118
+ tensor<fp16, [1, 128]> lstm_out_batch_first_2_to_fp16 = cast(dtype = lstm_out_batch_first_2_to_fp16_dtype_0, x = lstm_out_batch_first_2)[name = tensor<string, []>("cast_3")];
119
+ tensor<fp16, [1, 1, 128]> cn_cast_fp16 = expand_dims(axes = cn_axes_0, x = lstm_out_batch_first_2_to_fp16)[name = tensor<string, []>("cn_cast_fp16")];
120
+ tensor<fp16, [1, 1, 128]> lstm_out_batch_first_0_to_fp16 = cast(dtype = lstm_out_batch_first_0_to_fp16_dtype_0, x = lstm_out_batch_first_0)[name = tensor<string, []>("cast_5")];
121
+ tensor<fp16, [1, 128, 1]> transpose_1_cast_fp16 = transpose(perm = transpose_1_perm_0, x = lstm_out_batch_first_0_to_fp16)[name = tensor<string, []>("transpose_2")];
122
+ tensor<fp16, [1, 128, 1]> input_23_cast_fp16 = relu(x = transpose_1_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
123
+ tensor<string, []> input_pad_type_0 = const()[name = tensor<string, []>("input_pad_type_0"), val = tensor<string, []>("valid")];
124
+ tensor<int32, [1]> input_strides_0 = const()[name = tensor<string, []>("input_strides_0"), val = tensor<int32, [1]>([1])];
125
+ tensor<int32, [2]> input_pad_0 = const()[name = tensor<string, []>("input_pad_0"), val = tensor<int32, [2]>([0, 0])];
126
+ tensor<int32, [1]> input_dilations_0 = const()[name = tensor<string, []>("input_dilations_0"), val = tensor<int32, [1]>([1])];
127
+ tensor<int32, []> input_groups_0 = const()[name = tensor<string, []>("input_groups_0"), val = tensor<int32, []>(1)];
128
+ tensor<fp16, [1, 128, 1]> decoder_final_conv_weight_to_fp16 = const()[name = tensor<string, []>("decoder_final_conv_weight_to_fp16"), val = tensor<fp16, [1, 128, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(881984)))];
129
+ tensor<fp16, [1]> decoder_final_conv_bias_to_fp16 = const()[name = tensor<string, []>("decoder_final_conv_bias_to_fp16"), val = tensor<fp16, [1]>([0x1.dfp-5])];
130
+ tensor<fp16, [1, 1, 1]> input_cast_fp16 = conv(bias = decoder_final_conv_bias_to_fp16, dilations = input_dilations_0, groups = input_groups_0, pad = input_pad_0, pad_type = input_pad_type_0, strides = input_strides_0, weight = decoder_final_conv_weight_to_fp16, x = input_23_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
131
+ tensor<fp16, [1, 1, 1]> var_124_cast_fp16 = sigmoid(x = input_cast_fp16)[name = tensor<string, []>("op_124_cast_fp16")];
132
+ tensor<string, []> var_124_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_124_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
133
+ tensor<int32, [1]> var_125_axes_0 = const()[name = tensor<string, []>("op_125_axes_0"), val = tensor<int32, [1]>([0])];
134
+ tensor<fp16, [1, 128]> var_125_cast_fp16 = squeeze(axes = var_125_axes_0, x = hn_cast_fp16)[name = tensor<string, []>("op_125_cast_fp16")];
135
+ tensor<string, []> var_125_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_125_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
136
+ tensor<int32, [1]> var_126_axes_0 = const()[name = tensor<string, []>("op_126_axes_0"), val = tensor<int32, [1]>([0])];
137
+ tensor<fp16, [1, 128]> var_126_cast_fp16 = squeeze(axes = var_126_axes_0, x = cn_cast_fp16)[name = tensor<string, []>("op_126_cast_fp16")];
138
+ tensor<string, []> var_126_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_126_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
139
+ tensor<fp32, [1, 128]> new_cell_state = cast(dtype = var_126_cast_fp16_to_fp32_dtype_0, x = var_126_cast_fp16)[name = tensor<string, []>("cast_0")];
140
+ tensor<fp32, [1, 128]> new_hidden_state = cast(dtype = var_125_cast_fp16_to_fp32_dtype_0, x = var_125_cast_fp16)[name = tensor<string, []>("cast_1")];
141
+ tensor<fp32, [1, 1, 1]> vad_output = cast(dtype = var_124_cast_fp16_to_fp32_dtype_0, x = var_124_cast_fp16)[name = tensor<string, []>("cast_2")];
142
+ } -> (vad_output, new_hidden_state, new_cell_state);
143
+ }
silero-vad-unified-v6.0.0.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:853cf34740d3f5061f977ebe2976f7c921b064261c9c4753b3a1196f2dba42b4
3
+ size 882304
silero-vad-unified-v6.0.0.mlpackage/Data/com.apple.CoreML/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:532a63e7db357d739761635285c295b6d440a07d081951e914351efc1f6fd8d9
3
+ size 22122
silero-vad-unified-v6.0.0.mlpackage/Data/com.apple.CoreML/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:853cf34740d3f5061f977ebe2976f7c921b064261c9c4753b3a1196f2dba42b4
3
+ size 882304
silero-vad-unified-v6.0.0.mlpackage/Manifest.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fileFormatVersion": "1.0.0",
3
+ "itemInfoEntries": {
4
+ "18F41392-7D2B-413D-B76B-A0C5C5514516": {
5
+ "author": "com.apple.CoreML",
6
+ "description": "CoreML Model Specification",
7
+ "name": "model.mlmodel",
8
+ "path": "com.apple.CoreML/model.mlmodel"
9
+ },
10
+ "40D9B5FB-A936-4C45-9B1C-E7E6177C06D0": {
11
+ "author": "com.apple.CoreML",
12
+ "description": "CoreML Model Weights",
13
+ "name": "weights",
14
+ "path": "com.apple.CoreML/weights"
15
+ }
16
+ },
17
+ "rootModelIdentifier": "18F41392-7D2B-413D-B76B-A0C5C5514516"
18
+ }
silero_vad.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35c6d0bd3f8dd431fed72221005853ffe3621af1b550951093c41d0b918d210e
3
+ size 243
silero_vad.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca7f6a0ab7a349477fed1864e6cf7cb6adf611f017c0c5f0218c694d25e1434a
3
+ size 422
silero_vad.mlmodelc/metadata.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "VAD with SE modules trained on MUSAN (86.47% accuracy)",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32 1 × 1)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1]",
13
+ "name" : "vad_probability",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "version" : "2.0",
18
+ "modelParameters" : [
19
+
20
+ ],
21
+ "author" : "Silero VAD with Trained SE Modules",
22
+ "specificationVersion" : 6,
23
+ "storagePrecision" : "Mixed (Float16, Float32)",
24
+ "mlProgramOperationTypeHistogram" : {
25
+ "Concat" : 4,
26
+ "Lstm" : 1,
27
+ "Linear" : 14,
28
+ "SliceByIndex" : 3,
29
+ "LayerNorm" : 1,
30
+ "Pow" : 6,
31
+ "Stack" : 1,
32
+ "Transpose" : 3,
33
+ "Relu" : 9,
34
+ "ReduceMean" : 5,
35
+ "Cast" : 4,
36
+ "Reshape" : 8,
37
+ "Add" : 6,
38
+ "Sqrt" : 3,
39
+ "Sigmoid" : 5,
40
+ "Mul" : 5,
41
+ "Conv" : 5,
42
+ "Squeeze" : 1
43
+ },
44
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
45
+ "stateSchema" : [
46
+
47
+ ],
48
+ "isUpdatable" : "0",
49
+ "availability" : {
50
+ "macOS" : "12.0",
51
+ "tvOS" : "15.0",
52
+ "visionOS" : "1.0",
53
+ "watchOS" : "8.0",
54
+ "iOS" : "15.0",
55
+ "macCatalyst" : "15.0"
56
+ },
57
+ "modelType" : {
58
+ "name" : "MLModelType_mlProgram"
59
+ },
60
+ "inputSchema" : [
61
+ {
62
+ "hasShapeFlexibility" : "0",
63
+ "isOptional" : "0",
64
+ "dataType" : "Float32",
65
+ "formattedType" : "MultiArray (Float32 1 × 512)",
66
+ "shortDescription" : "",
67
+ "shape" : "[1, 512]",
68
+ "name" : "audio_chunk",
69
+ "type" : "MultiArray"
70
+ }
71
+ ],
72
+ "userDefinedMetadata" : {
73
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
74
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
75
+ "com.github.apple.coremltools.version" : "8.3.0"
76
+ },
77
+ "generatedClassName" : "silero_vad_se_trained",
78
+ "method" : "predict"
79
+ }
80
+ ]
silero_vad.mlmodelc/model.mil ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})]
3
+ {
4
+ func main<ios15>(tensor<fp32, [1, 512]> audio_chunk) {
5
+ tensor<int32, [2]> frame_1_begin_0 = const()[name = tensor<string, []>("frame_1_begin_0"), val = tensor<int32, [2]>([0, 0])];
6
+ tensor<int32, [2]> frame_1_end_0 = const()[name = tensor<string, []>("frame_1_end_0"), val = tensor<int32, [2]>([1, 256])];
7
+ tensor<bool, [2]> frame_1_end_mask_0 = const()[name = tensor<string, []>("frame_1_end_mask_0"), val = tensor<bool, [2]>([true, false])];
8
+ tensor<string, []> audio_chunk_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_chunk_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
9
+ tensor<fp16, [1, 512]> audio_chunk_to_fp16 = cast(dtype = audio_chunk_to_fp16_dtype_0, x = audio_chunk)[name = tensor<string, []>("cast_11")];
10
+ tensor<fp16, [1, 256]> frame_1_cast_fp16 = slice_by_index(begin = frame_1_begin_0, end = frame_1_end_0, end_mask = frame_1_end_mask_0, x = audio_chunk_to_fp16)[name = tensor<string, []>("frame_1_cast_fp16")];
11
+ tensor<int32, [2]> frame_3_begin_0 = const()[name = tensor<string, []>("frame_3_begin_0"), val = tensor<int32, [2]>([0, 128])];
12
+ tensor<int32, [2]> frame_3_end_0 = const()[name = tensor<string, []>("frame_3_end_0"), val = tensor<int32, [2]>([1, 384])];
13
+ tensor<bool, [2]> frame_3_end_mask_0 = const()[name = tensor<string, []>("frame_3_end_mask_0"), val = tensor<bool, [2]>([true, false])];
14
+ tensor<fp16, [1, 256]> frame_3_cast_fp16 = slice_by_index(begin = frame_3_begin_0, end = frame_3_end_0, end_mask = frame_3_end_mask_0, x = audio_chunk_to_fp16)[name = tensor<string, []>("frame_3_cast_fp16")];
15
+ tensor<int32, [2]> frame_begin_0 = const()[name = tensor<string, []>("frame_begin_0"), val = tensor<int32, [2]>([0, 256])];
16
+ tensor<int32, [2]> frame_end_0 = const()[name = tensor<string, []>("frame_end_0"), val = tensor<int32, [2]>([1, 1])];
17
+ tensor<bool, [2]> frame_end_mask_0 = const()[name = tensor<string, []>("frame_end_mask_0"), val = tensor<bool, [2]>([true, true])];
18
+ tensor<fp16, [1, 256]> frame_cast_fp16 = slice_by_index(begin = frame_begin_0, end = frame_end_0, end_mask = frame_end_mask_0, x = audio_chunk_to_fp16)[name = tensor<string, []>("frame_cast_fp16")];
19
+ tensor<fp16, [129, 256]> var_26_to_fp16 = const()[name = tensor<string, []>("op_26_to_fp16"), val = tensor<fp16, [129, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
20
+ tensor<fp16, [129]> var_38_bias_0_to_fp16 = const()[name = tensor<string, []>("op_38_bias_0_to_fp16"), val = tensor<fp16, [129]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(66176)))];
21
+ tensor<fp16, [1, 129]> var_38_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = var_26_to_fp16, x = frame_1_cast_fp16)[name = tensor<string, []>("op_38_cast_fp16")];
22
+ tensor<fp16, [129, 256]> var_29_to_fp16 = const()[name = tensor<string, []>("op_29_to_fp16"), val = tensor<fp16, [129, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(66560)))];
23
+ tensor<fp16, [1, 129]> var_40_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = var_29_to_fp16, x = frame_1_cast_fp16)[name = tensor<string, []>("op_40_cast_fp16")];
24
+ tensor<fp16, []> var_20_promoted_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
25
+ tensor<fp16, [1, 129]> var_41_cast_fp16 = pow(x = var_38_cast_fp16, y = var_20_promoted_to_fp16)[name = tensor<string, []>("op_41_cast_fp16")];
26
+ tensor<fp16, []> var_20_promoted_1_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_1_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
27
+ tensor<fp16, [1, 129]> var_42_cast_fp16 = pow(x = var_40_cast_fp16, y = var_20_promoted_1_to_fp16)[name = tensor<string, []>("op_42_cast_fp16")];
28
+ tensor<fp16, [1, 129]> var_43_cast_fp16 = add(x = var_41_cast_fp16, y = var_42_cast_fp16)[name = tensor<string, []>("op_43_cast_fp16")];
29
+ tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.1p-20)];
30
+ tensor<fp16, [1, 129]> var_45_cast_fp16 = add(x = var_43_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
31
+ tensor<fp16, [1, 129]> var_46_cast_fp16 = sqrt(x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
32
+ tensor<fp16, [1, 129]> var_48_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = var_26_to_fp16, x = frame_3_cast_fp16)[name = tensor<string, []>("op_48_cast_fp16")];
33
+ tensor<fp16, [1, 129]> var_50_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = var_29_to_fp16, x = frame_3_cast_fp16)[name = tensor<string, []>("op_50_cast_fp16")];
34
+ tensor<fp16, []> var_20_promoted_2_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_2_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
35
+ tensor<fp16, [1, 129]> var_51_cast_fp16 = pow(x = var_48_cast_fp16, y = var_20_promoted_2_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
36
+ tensor<fp16, []> var_20_promoted_3_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_3_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
37
+ tensor<fp16, [1, 129]> var_52_cast_fp16 = pow(x = var_50_cast_fp16, y = var_20_promoted_3_to_fp16)[name = tensor<string, []>("op_52_cast_fp16")];
38
+ tensor<fp16, [1, 129]> var_53_cast_fp16 = add(x = var_51_cast_fp16, y = var_52_cast_fp16)[name = tensor<string, []>("op_53_cast_fp16")];
39
+ tensor<fp16, []> var_54_to_fp16 = const()[name = tensor<string, []>("op_54_to_fp16"), val = tensor<fp16, []>(0x1.1p-20)];
40
+ tensor<fp16, [1, 129]> var_55_cast_fp16 = add(x = var_53_cast_fp16, y = var_54_to_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
41
+ tensor<fp16, [1, 129]> var_56_cast_fp16 = sqrt(x = var_55_cast_fp16)[name = tensor<string, []>("op_56_cast_fp16")];
42
+ tensor<fp16, [1, 129]> var_58_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = var_26_to_fp16, x = frame_cast_fp16)[name = tensor<string, []>("op_58_cast_fp16")];
43
+ tensor<fp16, [1, 129]> var_60_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = var_29_to_fp16, x = frame_cast_fp16)[name = tensor<string, []>("op_60_cast_fp16")];
44
+ tensor<fp16, []> var_20_promoted_4_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_4_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
45
+ tensor<fp16, [1, 129]> var_61_cast_fp16 = pow(x = var_58_cast_fp16, y = var_20_promoted_4_to_fp16)[name = tensor<string, []>("op_61_cast_fp16")];
46
+ tensor<fp16, []> var_20_promoted_5_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_5_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
47
+ tensor<fp16, [1, 129]> var_62_cast_fp16 = pow(x = var_60_cast_fp16, y = var_20_promoted_5_to_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
48
+ tensor<fp16, [1, 129]> var_63_cast_fp16 = add(x = var_61_cast_fp16, y = var_62_cast_fp16)[name = tensor<string, []>("op_63_cast_fp16")];
49
+ tensor<fp16, []> var_64_to_fp16 = const()[name = tensor<string, []>("op_64_to_fp16"), val = tensor<fp16, []>(0x1.1p-20)];
50
+ tensor<fp16, [1, 129]> var_65_cast_fp16 = add(x = var_63_cast_fp16, y = var_64_to_fp16)[name = tensor<string, []>("op_65_cast_fp16")];
51
+ tensor<fp16, [1, 129]> magnitude_cast_fp16 = sqrt(x = var_65_cast_fp16)[name = tensor<string, []>("magnitude_cast_fp16")];
52
+ tensor<int32, []> input_1_axis_0 = const()[name = tensor<string, []>("input_1_axis_0"), val = tensor<int32, []>(2)];
53
+ tensor<fp16, [1, 129, 3]> input_1_cast_fp16 = stack(axis = input_1_axis_0, values = (var_46_cast_fp16, var_56_cast_fp16, magnitude_cast_fp16))[name = tensor<string, []>("input_1_cast_fp16")];
54
+ tensor<string, []> x_1_pad_type_0 = const()[name = tensor<string, []>("x_1_pad_type_0"), val = tensor<string, []>("custom")];
55
+ tensor<int32, [2]> x_1_pad_0 = const()[name = tensor<string, []>("x_1_pad_0"), val = tensor<int32, [2]>([1, 1])];
56
+ tensor<int32, [1]> x_1_strides_0 = const()[name = tensor<string, []>("x_1_strides_0"), val = tensor<int32, [1]>([1])];
57
+ tensor<int32, [1]> x_1_dilations_0 = const()[name = tensor<string, []>("x_1_dilations_0"), val = tensor<int32, [1]>([1])];
58
+ tensor<int32, []> x_1_groups_0 = const()[name = tensor<string, []>("x_1_groups_0"), val = tensor<int32, []>(1)];
59
+ tensor<fp16, [128, 129, 3]> vad_encoder_encoder_0_conv_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_conv_weight_to_fp16"), val = tensor<fp16, [128, 129, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132672)))];
60
+ tensor<fp16, [128]> vad_encoder_encoder_0_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_conv_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(231808)))];
61
+ tensor<fp16, [1, 128, 3]> x_1_cast_fp16 = conv(bias = vad_encoder_encoder_0_conv_bias_to_fp16, dilations = x_1_dilations_0, groups = x_1_groups_0, pad = x_1_pad_0, pad_type = x_1_pad_type_0, strides = x_1_strides_0, weight = vad_encoder_encoder_0_conv_weight_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("x_1_cast_fp16")];
62
+ tensor<int32, [1]> reduce_mean_0_axes_0 = const()[name = tensor<string, []>("reduce_mean_0_axes_0"), val = tensor<int32, [1]>([-1])];
63
+ tensor<bool, []> reduce_mean_0_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_0_keep_dims_0"), val = tensor<bool, []>(true)];
64
+ tensor<fp16, [1, 128, 1]> reduce_mean_0_cast_fp16 = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = x_1_cast_fp16)[name = tensor<string, []>("reduce_mean_0_cast_fp16")];
65
+ tensor<int32, []> concat_0_axis_0 = const()[name = tensor<string, []>("concat_0_axis_0"), val = tensor<int32, []>(-1)];
66
+ tensor<bool, []> concat_0_interleave_0 = const()[name = tensor<string, []>("concat_0_interleave_0"), val = tensor<bool, []>(false)];
67
+ tensor<fp16, [1, 128, 1]> concat_0_cast_fp16 = concat(axis = concat_0_axis_0, interleave = concat_0_interleave_0, values = reduce_mean_0_cast_fp16)[name = tensor<string, []>("concat_0_cast_fp16")];
68
+ tensor<int32, [2]> var_92 = const()[name = tensor<string, []>("op_92"), val = tensor<int32, [2]>([1, 128])];
69
+ tensor<fp16, [1, 128]> input_3_cast_fp16 = reshape(shape = var_92, x = concat_0_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
70
+ tensor<fp16, [16, 128]> vad_encoder_encoder_0_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc1_weight_to_fp16"), val = tensor<fp16, [16, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(232128)))];
71
+ tensor<fp16, [16]> vad_encoder_encoder_0_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc1_bias_to_fp16"), val = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(236288)))];
72
+ tensor<fp16, [1, 16]> linear_0_cast_fp16 = linear(bias = vad_encoder_encoder_0_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_0_se_fc1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
73
+ tensor<fp16, [1, 16]> input_7_cast_fp16 = relu(x = linear_0_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
74
+ tensor<fp16, [128, 16]> vad_encoder_encoder_0_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc2_weight_to_fp16"), val = tensor<fp16, [128, 16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(236416)))];
75
+ tensor<fp16, [128]> vad_encoder_encoder_0_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc2_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(240576)))];
76
+ tensor<fp16, [1, 128]> linear_1_cast_fp16 = linear(bias = vad_encoder_encoder_0_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_0_se_fc2_weight_to_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("linear_1_cast_fp16")];
77
+ tensor<fp16, [1, 128]> y_1_cast_fp16 = sigmoid(x = linear_1_cast_fp16)[name = tensor<string, []>("y_1_cast_fp16")];
78
+ tensor<int32, [3]> var_102 = const()[name = tensor<string, []>("op_102"), val = tensor<int32, [3]>([1, 128, 1])];
79
+ tensor<fp16, [1, 128, 1]> y_3_cast_fp16 = reshape(shape = var_102, x = y_1_cast_fp16)[name = tensor<string, []>("y_3_cast_fp16")];
80
+ tensor<fp16, [1, 128, 3]> input_11_cast_fp16 = mul(x = x_1_cast_fp16, y = y_3_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
81
+ tensor<fp16, [1, 128, 3]> input_13_cast_fp16 = relu(x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
82
+ tensor<string, []> x_3_pad_type_0 = const()[name = tensor<string, []>("x_3_pad_type_0"), val = tensor<string, []>("custom")];
83
+ tensor<int32, [2]> x_3_pad_0 = const()[name = tensor<string, []>("x_3_pad_0"), val = tensor<int32, [2]>([1, 1])];
84
+ tensor<int32, [1]> x_3_strides_0 = const()[name = tensor<string, []>("x_3_strides_0"), val = tensor<int32, [1]>([1])];
85
+ tensor<int32, [1]> x_3_dilations_0 = const()[name = tensor<string, []>("x_3_dilations_0"), val = tensor<int32, [1]>([1])];
86
+ tensor<int32, []> x_3_groups_0 = const()[name = tensor<string, []>("x_3_groups_0"), val = tensor<int32, []>(1)];
87
+ tensor<fp16, [64, 128, 3]> vad_encoder_encoder_1_conv_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_conv_weight_to_fp16"), val = tensor<fp16, [64, 128, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(240896)))];
88
+ tensor<fp16, [64]> vad_encoder_encoder_1_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_conv_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(290112)))];
89
+ tensor<fp16, [1, 64, 3]> x_3_cast_fp16 = conv(bias = vad_encoder_encoder_1_conv_bias_to_fp16, dilations = x_3_dilations_0, groups = x_3_groups_0, pad = x_3_pad_0, pad_type = x_3_pad_type_0, strides = x_3_strides_0, weight = vad_encoder_encoder_1_conv_weight_to_fp16, x = input_13_cast_fp16)[name = tensor<string, []>("x_3_cast_fp16")];
90
+ tensor<int32, [1]> reduce_mean_1_axes_0 = const()[name = tensor<string, []>("reduce_mean_1_axes_0"), val = tensor<int32, [1]>([-1])];
91
+ tensor<bool, []> reduce_mean_1_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_1_keep_dims_0"), val = tensor<bool, []>(true)];
92
+ tensor<fp16, [1, 64, 1]> reduce_mean_1_cast_fp16 = reduce_mean(axes = reduce_mean_1_axes_0, keep_dims = reduce_mean_1_keep_dims_0, x = x_3_cast_fp16)[name = tensor<string, []>("reduce_mean_1_cast_fp16")];
93
+ tensor<int32, []> concat_1_axis_0 = const()[name = tensor<string, []>("concat_1_axis_0"), val = tensor<int32, []>(-1)];
94
+ tensor<bool, []> concat_1_interleave_0 = const()[name = tensor<string, []>("concat_1_interleave_0"), val = tensor<bool, []>(false)];
95
+ tensor<fp16, [1, 64, 1]> concat_1_cast_fp16 = concat(axis = concat_1_axis_0, interleave = concat_1_interleave_0, values = reduce_mean_1_cast_fp16)[name = tensor<string, []>("concat_1_cast_fp16")];
96
+ tensor<int32, [2]> var_121 = const()[name = tensor<string, []>("op_121"), val = tensor<int32, [2]>([1, 64])];
97
+ tensor<fp16, [1, 64]> input_15_cast_fp16 = reshape(shape = var_121, x = concat_1_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
98
+ tensor<fp16, [8, 64]> vad_encoder_encoder_1_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc1_weight_to_fp16"), val = tensor<fp16, [8, 64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(290304)))];
99
+ tensor<fp16, [8]> vad_encoder_encoder_1_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc1_bias_to_fp16"), val = tensor<fp16, [8]>([0x1.00cp-9, 0x1.6dcp-5, 0x1.82cp-5, 0x1.054p-5, 0x1.8a4p-4, 0x1.f88p-7, 0x1.234p-5, 0x1.514p-5])];
100
+ tensor<fp16, [1, 8]> linear_2_cast_fp16 = linear(bias = vad_encoder_encoder_1_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_1_se_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("linear_2_cast_fp16")];
101
+ tensor<fp16, [1, 8]> input_19_cast_fp16 = relu(x = linear_2_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
102
+ tensor<fp16, [64, 8]> vad_encoder_encoder_1_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc2_weight_to_fp16"), val = tensor<fp16, [64, 8]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(291392)))];
103
+ tensor<fp16, [64]> vad_encoder_encoder_1_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc2_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(292480)))];
104
+ tensor<fp16, [1, 64]> linear_3_cast_fp16 = linear(bias = vad_encoder_encoder_1_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_1_se_fc2_weight_to_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("linear_3_cast_fp16")];
105
+ tensor<fp16, [1, 64]> y_5_cast_fp16 = sigmoid(x = linear_3_cast_fp16)[name = tensor<string, []>("y_5_cast_fp16")];
106
+ tensor<int32, [3]> var_131 = const()[name = tensor<string, []>("op_131"), val = tensor<int32, [3]>([1, 64, 1])];
107
+ tensor<fp16, [1, 64, 1]> y_7_cast_fp16 = reshape(shape = var_131, x = y_5_cast_fp16)[name = tensor<string, []>("y_7_cast_fp16")];
108
+ tensor<fp16, [1, 64, 3]> input_23_cast_fp16 = mul(x = x_3_cast_fp16, y = y_7_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
109
+ tensor<fp16, [1, 64, 3]> input_25_cast_fp16 = relu(x = input_23_cast_fp16)[name = tensor<string, []>("input_25_cast_fp16")];
110
+ tensor<string, []> x_5_pad_type_0 = const()[name = tensor<string, []>("x_5_pad_type_0"), val = tensor<string, []>("custom")];
111
+ tensor<int32, [2]> x_5_pad_0 = const()[name = tensor<string, []>("x_5_pad_0"), val = tensor<int32, [2]>([1, 1])];
112
+ tensor<int32, [1]> x_5_strides_0 = const()[name = tensor<string, []>("x_5_strides_0"), val = tensor<int32, [1]>([1])];
113
+ tensor<int32, [1]> x_5_dilations_0 = const()[name = tensor<string, []>("x_5_dilations_0"), val = tensor<int32, [1]>([1])];
114
+ tensor<int32, []> x_5_groups_0 = const()[name = tensor<string, []>("x_5_groups_0"), val = tensor<int32, []>(1)];
115
+ tensor<fp16, [64, 64, 3]> vad_encoder_encoder_2_conv_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_conv_weight_to_fp16"), val = tensor<fp16, [64, 64, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(292672)))];
116
+ tensor<fp16, [64]> vad_encoder_encoder_2_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_conv_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(317312)))];
117
+ tensor<fp16, [1, 64, 3]> x_5_cast_fp16 = conv(bias = vad_encoder_encoder_2_conv_bias_to_fp16, dilations = x_5_dilations_0, groups = x_5_groups_0, pad = x_5_pad_0, pad_type = x_5_pad_type_0, strides = x_5_strides_0, weight = vad_encoder_encoder_2_conv_weight_to_fp16, x = input_25_cast_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
118
+ tensor<int32, [1]> reduce_mean_2_axes_0 = const()[name = tensor<string, []>("reduce_mean_2_axes_0"), val = tensor<int32, [1]>([-1])];
119
+ tensor<bool, []> reduce_mean_2_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_2_keep_dims_0"), val = tensor<bool, []>(true)];
120
+ tensor<fp16, [1, 64, 1]> reduce_mean_2_cast_fp16 = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = x_5_cast_fp16)[name = tensor<string, []>("reduce_mean_2_cast_fp16")];
121
+ tensor<int32, []> concat_2_axis_0 = const()[name = tensor<string, []>("concat_2_axis_0"), val = tensor<int32, []>(-1)];
122
+ tensor<bool, []> concat_2_interleave_0 = const()[name = tensor<string, []>("concat_2_interleave_0"), val = tensor<bool, []>(false)];
123
+ tensor<fp16, [1, 64, 1]> concat_2_cast_fp16 = concat(axis = concat_2_axis_0, interleave = concat_2_interleave_0, values = reduce_mean_2_cast_fp16)[name = tensor<string, []>("concat_2_cast_fp16")];
124
+ tensor<int32, [2]> var_150 = const()[name = tensor<string, []>("op_150"), val = tensor<int32, [2]>([1, 64])];
125
+ tensor<fp16, [1, 64]> input_27_cast_fp16 = reshape(shape = var_150, x = concat_2_cast_fp16)[name = tensor<string, []>("input_27_cast_fp16")];
126
+ tensor<fp16, [8, 64]> vad_encoder_encoder_2_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc1_weight_to_fp16"), val = tensor<fp16, [8, 64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(317504)))];
127
+ tensor<fp16, [8]> vad_encoder_encoder_2_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc1_bias_to_fp16"), val = tensor<fp16, [8]>([0x1.41cp-7, 0x1.4e8p-5, 0x1.7ccp-5, 0x1.554p-5, 0x1.d8cp-5, -0x1.34p-11, 0x1.2f8p-5, 0x1.0ap-5])];
128
+ tensor<fp16, [1, 8]> linear_4_cast_fp16 = linear(bias = vad_encoder_encoder_2_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_2_se_fc1_weight_to_fp16, x = input_27_cast_fp16)[name = tensor<string, []>("linear_4_cast_fp16")];
129
+ tensor<fp16, [1, 8]> input_31_cast_fp16 = relu(x = linear_4_cast_fp16)[name = tensor<string, []>("input_31_cast_fp16")];
130
+ tensor<fp16, [64, 8]> vad_encoder_encoder_2_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc2_weight_to_fp16"), val = tensor<fp16, [64, 8]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(318592)))];
131
+ tensor<fp16, [64]> vad_encoder_encoder_2_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc2_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(319680)))];
132
+ tensor<fp16, [1, 64]> linear_5_cast_fp16 = linear(bias = vad_encoder_encoder_2_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_2_se_fc2_weight_to_fp16, x = input_31_cast_fp16)[name = tensor<string, []>("linear_5_cast_fp16")];
133
+ tensor<fp16, [1, 64]> y_9_cast_fp16 = sigmoid(x = linear_5_cast_fp16)[name = tensor<string, []>("y_9_cast_fp16")];
134
+ tensor<int32, [3]> var_160 = const()[name = tensor<string, []>("op_160"), val = tensor<int32, [3]>([1, 64, 1])];
135
+ tensor<fp16, [1, 64, 1]> y_11_cast_fp16 = reshape(shape = var_160, x = y_9_cast_fp16)[name = tensor<string, []>("y_11_cast_fp16")];
136
+ tensor<fp16, [1, 64, 3]> input_35_cast_fp16 = mul(x = x_5_cast_fp16, y = y_11_cast_fp16)[name = tensor<string, []>("input_35_cast_fp16")];
137
+ tensor<fp16, [1, 64, 3]> input_37_cast_fp16 = relu(x = input_35_cast_fp16)[name = tensor<string, []>("input_37_cast_fp16")];
138
+ tensor<string, []> x_7_pad_type_0 = const()[name = tensor<string, []>("x_7_pad_type_0"), val = tensor<string, []>("custom")];
139
+ tensor<int32, [2]> x_7_pad_0 = const()[name = tensor<string, []>("x_7_pad_0"), val = tensor<int32, [2]>([1, 1])];
140
+ tensor<int32, [1]> x_7_strides_0 = const()[name = tensor<string, []>("x_7_strides_0"), val = tensor<int32, [1]>([1])];
141
+ tensor<int32, [1]> x_7_dilations_0 = const()[name = tensor<string, []>("x_7_dilations_0"), val = tensor<int32, [1]>([1])];
142
+ tensor<int32, []> x_7_groups_0 = const()[name = tensor<string, []>("x_7_groups_0"), val = tensor<int32, []>(1)];
143
+ tensor<fp16, [128, 64, 3]> vad_encoder_encoder_3_conv_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_conv_weight_to_fp16"), val = tensor<fp16, [128, 64, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(319872)))];
144
+ tensor<fp16, [128]> vad_encoder_encoder_3_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_conv_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(369088)))];
145
+ tensor<fp16, [1, 128, 3]> x_7_cast_fp16 = conv(bias = vad_encoder_encoder_3_conv_bias_to_fp16, dilations = x_7_dilations_0, groups = x_7_groups_0, pad = x_7_pad_0, pad_type = x_7_pad_type_0, strides = x_7_strides_0, weight = vad_encoder_encoder_3_conv_weight_to_fp16, x = input_37_cast_fp16)[name = tensor<string, []>("x_7_cast_fp16")];
146
+ tensor<int32, [1]> reduce_mean_3_axes_0 = const()[name = tensor<string, []>("reduce_mean_3_axes_0"), val = tensor<int32, [1]>([-1])];
147
+ tensor<bool, []> reduce_mean_3_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_3_keep_dims_0"), val = tensor<bool, []>(true)];
148
+ tensor<fp16, [1, 128, 1]> reduce_mean_3_cast_fp16 = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = x_7_cast_fp16)[name = tensor<string, []>("reduce_mean_3_cast_fp16")];
149
+ tensor<int32, []> concat_3_axis_0 = const()[name = tensor<string, []>("concat_3_axis_0"), val = tensor<int32, []>(-1)];
150
+ tensor<bool, []> concat_3_interleave_0 = const()[name = tensor<string, []>("concat_3_interleave_0"), val = tensor<bool, []>(false)];
151
+ tensor<fp16, [1, 128, 1]> concat_3_cast_fp16 = concat(axis = concat_3_axis_0, interleave = concat_3_interleave_0, values = reduce_mean_3_cast_fp16)[name = tensor<string, []>("concat_3_cast_fp16")];
152
+ tensor<int32, [2]> var_179 = const()[name = tensor<string, []>("op_179"), val = tensor<int32, [2]>([1, 128])];
153
+ tensor<fp16, [1, 128]> input_39_cast_fp16 = reshape(shape = var_179, x = concat_3_cast_fp16)[name = tensor<string, []>("input_39_cast_fp16")];
154
+ tensor<fp16, [16, 128]> vad_encoder_encoder_3_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc1_weight_to_fp16"), val = tensor<fp16, [16, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(369408)))];
155
+ tensor<fp16, [16]> vad_encoder_encoder_3_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc1_bias_to_fp16"), val = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(373568)))];
156
+ tensor<fp16, [1, 16]> linear_6_cast_fp16 = linear(bias = vad_encoder_encoder_3_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_3_se_fc1_weight_to_fp16, x = input_39_cast_fp16)[name = tensor<string, []>("linear_6_cast_fp16")];
157
+ tensor<fp16, [1, 16]> input_43_cast_fp16 = relu(x = linear_6_cast_fp16)[name = tensor<string, []>("input_43_cast_fp16")];
158
+ tensor<fp16, [128, 16]> vad_encoder_encoder_3_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc2_weight_to_fp16"), val = tensor<fp16, [128, 16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(373696)))];
159
+ tensor<fp16, [128]> vad_encoder_encoder_3_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc2_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(377856)))];
160
+ tensor<fp16, [1, 128]> linear_7_cast_fp16 = linear(bias = vad_encoder_encoder_3_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_3_se_fc2_weight_to_fp16, x = input_43_cast_fp16)[name = tensor<string, []>("linear_7_cast_fp16")];
161
+ tensor<fp16, [1, 128]> y_13_cast_fp16 = sigmoid(x = linear_7_cast_fp16)[name = tensor<string, []>("y_13_cast_fp16")];
162
+ tensor<int32, [3]> var_189 = const()[name = tensor<string, []>("op_189"), val = tensor<int32, [3]>([1, 128, 1])];
163
+ tensor<fp16, [1, 128, 1]> y_cast_fp16 = reshape(shape = var_189, x = y_13_cast_fp16)[name = tensor<string, []>("y_cast_fp16")];
164
+ tensor<fp16, [1, 128, 3]> input_47_cast_fp16 = mul(x = x_7_cast_fp16, y = y_cast_fp16)[name = tensor<string, []>("input_47_cast_fp16")];
165
+ tensor<fp16, [1, 128, 3]> x_9_cast_fp16 = relu(x = input_47_cast_fp16)[name = tensor<string, []>("x_9_cast_fp16")];
166
+ tensor<int32, [1]> x_11_axes_0 = const()[name = tensor<string, []>("x_11_axes_0"), val = tensor<int32, [1]>([2])];
167
+ tensor<bool, []> x_11_keep_dims_0 = const()[name = tensor<string, []>("x_11_keep_dims_0"), val = tensor<bool, []>(true)];
168
+ tensor<fp16, [1, 128, 1]> x_11_cast_fp16 = reduce_mean(axes = x_11_axes_0, keep_dims = x_11_keep_dims_0, x = x_9_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
169
+ tensor<int32, [3]> transpose_6_perm_0 = const()[name = tensor<string, []>("transpose_6_perm_0"), val = tensor<int32, [3]>([2, 0, 1])];
170
+ tensor<string, []> transpose_6_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("transpose_6_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
171
+ tensor<fp32, [512]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<fp32, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(378176)))];
172
+ tensor<fp32, [512, 128]> concat_5 = const()[name = tensor<string, []>("concat_5"), val = tensor<fp32, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(380288)))];
173
+ tensor<fp32, [512, 128]> concat_6 = const()[name = tensor<string, []>("concat_6"), val = tensor<fp32, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(642496)))];
174
+ tensor<fp32, [1, 128]> input_49_batch_first_lstm_h0_squeeze = const()[name = tensor<string, []>("input_49_batch_first_lstm_h0_squeeze"), val = tensor<fp32, [1, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(904704)))];
175
+ tensor<string, []> input_49_batch_first_direction_0 = const()[name = tensor<string, []>("input_49_batch_first_direction_0"), val = tensor<string, []>("forward")];
176
+ tensor<bool, []> input_49_batch_first_output_sequence_0 = const()[name = tensor<string, []>("input_49_batch_first_output_sequence_0"), val = tensor<bool, []>(true)];
177
+ tensor<string, []> input_49_batch_first_recurrent_activation_0 = const()[name = tensor<string, []>("input_49_batch_first_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
178
+ tensor<string, []> input_49_batch_first_cell_activation_0 = const()[name = tensor<string, []>("input_49_batch_first_cell_activation_0"), val = tensor<string, []>("tanh")];
179
+ tensor<string, []> input_49_batch_first_activation_0 = const()[name = tensor<string, []>("input_49_batch_first_activation_0"), val = tensor<string, []>("tanh")];
180
+ tensor<fp16, [1, 1, 128]> transpose_6_cast_fp16 = transpose(perm = transpose_6_perm_0, x = x_11_cast_fp16)[name = tensor<string, []>("transpose_9")];
181
+ tensor<fp32, [1, 1, 128]> transpose_6_cast_fp16_to_fp32 = cast(dtype = transpose_6_cast_fp16_to_fp32_dtype_0, x = transpose_6_cast_fp16)[name = tensor<string, []>("cast_10")];
182
+ tensor<fp32, [1, 1, 128]> input_49_batch_first_0, tensor<fp32, [1, 128]> input_49_batch_first_1, tensor<fp32, [1, 128]> input_49_batch_first_2 = lstm(activation = input_49_batch_first_activation_0, bias = concat_4, cell_activation = input_49_batch_first_cell_activation_0, direction = input_49_batch_first_direction_0, initial_c = input_49_batch_first_lstm_h0_squeeze, initial_h = input_49_batch_first_lstm_h0_squeeze, output_sequence = input_49_batch_first_output_sequence_0, recurrent_activation = input_49_batch_first_recurrent_activation_0, weight_hh = concat_6, weight_ih = concat_5, x = transpose_6_cast_fp16_to_fp32)[name = tensor<string, []>("input_49_batch_first")];
183
+ tensor<int32, [3]> input_49_perm_0 = const()[name = tensor<string, []>("input_49_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
184
+ tensor<string, []> input_49_batch_first_0_to_fp16_dtype_0 = const()[name = tensor<string, []>("input_49_batch_first_0_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
185
+ tensor<int32, [1]> var_216_axes_0 = const()[name = tensor<string, []>("op_216_axes_0"), val = tensor<int32, [1]>([-1])];
186
+ tensor<fp16, [128]> vad_decoder_layer_norm_weight_to_fp16 = const()[name = tensor<string, []>("vad_decoder_layer_norm_weight_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(905280)))];
187
+ tensor<fp16, [128]> vad_decoder_layer_norm_bias_to_fp16 = const()[name = tensor<string, []>("vad_decoder_layer_norm_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(905600)))];
188
+ tensor<fp16, []> var_5_to_fp16 = const()[name = tensor<string, []>("op_5_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
189
+ tensor<fp16, [1, 1, 128]> input_49_batch_first_0_to_fp16 = cast(dtype = input_49_batch_first_0_to_fp16_dtype_0, x = input_49_batch_first_0)[name = tensor<string, []>("cast_9")];
190
+ tensor<fp16, [1, 1, 128]> input_49_cast_fp16 = transpose(perm = input_49_perm_0, x = input_49_batch_first_0_to_fp16)[name = tensor<string, []>("transpose_8")];
191
+ tensor<fp16, [1, 1, 128]> var_216_cast_fp16 = layer_norm(axes = var_216_axes_0, beta = vad_decoder_layer_norm_bias_to_fp16, epsilon = var_5_to_fp16, gamma = vad_decoder_layer_norm_weight_to_fp16, x = input_49_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
192
+ tensor<fp16, []> var_217_to_fp16 = const()[name = tensor<string, []>("op_217_to_fp16"), val = tensor<fp16, []>(0x1.334p-3)];
193
+ tensor<fp16, [1, 1, 128]> x_cast_fp16 = mul(x = var_216_cast_fp16, y = var_217_to_fp16)[name = tensor<string, []>("x_cast_fp16")];
194
+ tensor<int32, [3]> input_51_perm_0 = const()[name = tensor<string, []>("input_51_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
195
+ tensor<fp16, [1, 128, 1]> input_51_cast_fp16 = transpose(perm = input_51_perm_0, x = x_cast_fp16)[name = tensor<string, []>("transpose_7")];
196
+ tensor<fp16, [1, 128, 1]> input_55_cast_fp16 = relu(x = input_51_cast_fp16)[name = tensor<string, []>("input_55_cast_fp16")];
197
+ tensor<string, []> input_pad_type_0 = const()[name = tensor<string, []>("input_pad_type_0"), val = tensor<string, []>("valid")];
198
+ tensor<int32, [1]> input_strides_0 = const()[name = tensor<string, []>("input_strides_0"), val = tensor<int32, [1]>([1])];
199
+ tensor<int32, [2]> input_pad_0 = const()[name = tensor<string, []>("input_pad_0"), val = tensor<int32, [2]>([0, 0])];
200
+ tensor<int32, [1]> input_dilations_0 = const()[name = tensor<string, []>("input_dilations_0"), val = tensor<int32, [1]>([1])];
201
+ tensor<int32, []> input_groups_0 = const()[name = tensor<string, []>("input_groups_0"), val = tensor<int32, []>(1)];
202
+ tensor<fp16, [1, 128, 1]> vad_decoder_conv_weight_to_fp16 = const()[name = tensor<string, []>("vad_decoder_conv_weight_to_fp16"), val = tensor<fp16, [1, 128, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(905920)))];
203
+ tensor<fp16, [1]> vad_decoder_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_decoder_conv_bias_to_fp16"), val = tensor<fp16, [1]>([0x1.dfp-5])];
204
+ tensor<fp16, [1, 1, 1]> input_cast_fp16 = conv(bias = vad_decoder_conv_bias_to_fp16, dilations = input_dilations_0, groups = input_groups_0, pad = input_pad_0, pad_type = input_pad_type_0, strides = input_strides_0, weight = vad_decoder_conv_weight_to_fp16, x = input_55_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
205
+ tensor<fp16, [1, 1, 1]> var_229_cast_fp16 = sigmoid(x = input_cast_fp16)[name = tensor<string, []>("op_229_cast_fp16")];
206
+ tensor<int32, [1]> var_230_axes_0 = const()[name = tensor<string, []>("op_230_axes_0"), val = tensor<int32, [1]>([-1])];
207
+ tensor<fp16, [1, 1]> var_230_cast_fp16 = squeeze(axes = var_230_axes_0, x = var_229_cast_fp16)[name = tensor<string, []>("op_230_cast_fp16")];
208
+ tensor<string, []> var_230_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_230_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
209
+ tensor<fp32, [1, 1]> vad_probability = cast(dtype = var_230_cast_fp16_to_fp32_dtype_0, x = var_230_cast_fp16)[name = tensor<string, []>("cast_8")];
210
+ } -> (vad_probability);
211
+ }
silero_vad.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45846d0738d3bf5e4b6e9e7d2fddda7b1ad07da33d473f0405e51d3b6c4c11a9
3
+ size 906240
silero_vad_se_trained.mlpackage/Data/com.apple.CoreML/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74b401d76333292a0cafa1024f98f4e4e838427e67f215b65c95ddeed6a2d16e
3
+ size 32557
silero_vad_se_trained.mlpackage/Data/com.apple.CoreML/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45846d0738d3bf5e4b6e9e7d2fddda7b1ad07da33d473f0405e51d3b6c4c11a9
3
+ size 906240
silero_vad_se_trained.mlpackage/Manifest.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fileFormatVersion": "1.0.0",
3
+ "itemInfoEntries": {
4
+ "5162830C-61A9-43CB-85D2-5E27DE992B03": {
5
+ "author": "com.apple.CoreML",
6
+ "description": "CoreML Model Weights",
7
+ "name": "weights",
8
+ "path": "com.apple.CoreML/weights"
9
+ },
10
+ "C9FCCBCB-EEAA-416F-B703-903A78B678E1": {
11
+ "author": "com.apple.CoreML",
12
+ "description": "CoreML Model Specification",
13
+ "name": "model.mlmodel",
14
+ "path": "com.apple.CoreML/model.mlmodel"
15
+ }
16
+ },
17
+ "rootModelIdentifier": "C9FCCBCB-EEAA-416F-B703-903A78B678E1"
18
+ }
silero_vad_se_trained_4bit.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed0b15a6463ed93cf4e8ee8ad21d5df4e6c54e1db074b646bcd88b9acfa517a4
3
+ size 243
silero_vad_se_trained_4bit.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d430fe5a090b2ae6424d79b458c3ecc7899d65b84df6ad0455df0bb0b378822
3
+ size 422
silero_vad_se_trained_4bit.mlmodelc/metadata.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "VAD with SE modules trained on MUSAN (86.47% accuracy)",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32 1 × 1)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1]",
13
+ "name" : "vad_probability",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "version" : "2.0",
18
+ "modelParameters" : [
19
+
20
+ ],
21
+ "author" : "Silero VAD with Trained SE Modules",
22
+ "specificationVersion" : 7,
23
+ "storagePrecision" : "Mixed (Float16, Float32, Palettized (4 bits))",
24
+ "mlProgramOperationTypeHistogram" : {
25
+ "Concat" : 4,
26
+ "Ios16.cast" : 4,
27
+ "Ios16.mul" : 5,
28
+ "Ios16.layerNorm" : 1,
29
+ "SliceByIndex" : 3,
30
+ "Ios16.constexprLutToDense" : 8,
31
+ "Ios16.sqrt" : 3,
32
+ "Stack" : 1,
33
+ "Transpose" : 3,
34
+ "Ios16.sigmoid" : 5,
35
+ "Ios16.conv" : 5,
36
+ "Ios16.add" : 6,
37
+ "Ios16.linear" : 14,
38
+ "Ios16.reduceMean" : 5,
39
+ "Ios16.relu" : 9,
40
+ "Ios16.lstm" : 1,
41
+ "Ios16.pow" : 6,
42
+ "Squeeze" : 1,
43
+ "Ios16.reshape" : 8
44
+ },
45
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
46
+ "stateSchema" : [
47
+
48
+ ],
49
+ "isUpdatable" : "0",
50
+ "availability" : {
51
+ "macOS" : "13.0",
52
+ "tvOS" : "16.0",
53
+ "visionOS" : "1.0",
54
+ "watchOS" : "9.0",
55
+ "iOS" : "16.0",
56
+ "macCatalyst" : "16.0"
57
+ },
58
+ "modelType" : {
59
+ "name" : "MLModelType_mlProgram"
60
+ },
61
+ "inputSchema" : [
62
+ {
63
+ "hasShapeFlexibility" : "0",
64
+ "isOptional" : "0",
65
+ "dataType" : "Float32",
66
+ "formattedType" : "MultiArray (Float32 1 × 512)",
67
+ "shortDescription" : "",
68
+ "shape" : "[1, 512]",
69
+ "name" : "audio_chunk",
70
+ "type" : "MultiArray"
71
+ }
72
+ ],
73
+ "userDefinedMetadata" : {
74
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
75
+ "com.github.apple.coremltools.version" : "8.3.0",
76
+ "com.github.apple.coremltools.source" : "torch==2.5.0"
77
+ },
78
+ "generatedClassName" : "silero_vad_se_trained_palettize_4bit",
79
+ "method" : "predict"
80
+ }
81
+ ]
silero_vad_se_trained_4bit.mlmodelc/model.mil ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3500.14.1"}, {"coremlc-version", "3500.32.1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp32, [1, 512]> audio_chunk) {
5
+ tensor<int32, [2]> frame_1_begin_0 = const()[name = tensor<string, []>("frame_1_begin_0"), val = tensor<int32, [2]>([0, 0])];
6
+ tensor<int32, [2]> frame_1_end_0 = const()[name = tensor<string, []>("frame_1_end_0"), val = tensor<int32, [2]>([1, 256])];
7
+ tensor<bool, [2]> frame_1_end_mask_0 = const()[name = tensor<string, []>("frame_1_end_mask_0"), val = tensor<bool, [2]>([true, false])];
8
+ tensor<string, []> audio_chunk_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_chunk_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
9
+ tensor<fp16, [1, 512]> audio_chunk_to_fp16 = cast(dtype = audio_chunk_to_fp16_dtype_0, x = audio_chunk)[name = tensor<string, []>("cast_3")];
10
+ tensor<fp16, [1, 256]> frame_1_cast_fp16 = slice_by_index(begin = frame_1_begin_0, end = frame_1_end_0, end_mask = frame_1_end_mask_0, x = audio_chunk_to_fp16)[name = tensor<string, []>("frame_1_cast_fp16")];
11
+ tensor<int32, [2]> frame_3_begin_0 = const()[name = tensor<string, []>("frame_3_begin_0"), val = tensor<int32, [2]>([0, 128])];
12
+ tensor<int32, [2]> frame_3_end_0 = const()[name = tensor<string, []>("frame_3_end_0"), val = tensor<int32, [2]>([1, 384])];
13
+ tensor<bool, [2]> frame_3_end_mask_0 = const()[name = tensor<string, []>("frame_3_end_mask_0"), val = tensor<bool, [2]>([true, false])];
14
+ tensor<fp16, [1, 256]> frame_3_cast_fp16 = slice_by_index(begin = frame_3_begin_0, end = frame_3_end_0, end_mask = frame_3_end_mask_0, x = audio_chunk_to_fp16)[name = tensor<string, []>("frame_3_cast_fp16")];
15
+ tensor<int32, [2]> frame_begin_0 = const()[name = tensor<string, []>("frame_begin_0"), val = tensor<int32, [2]>([0, 256])];
16
+ tensor<int32, [2]> frame_end_0 = const()[name = tensor<string, []>("frame_end_0"), val = tensor<int32, [2]>([1, 1])];
17
+ tensor<bool, [2]> frame_end_mask_0 = const()[name = tensor<string, []>("frame_end_mask_0"), val = tensor<bool, [2]>([true, true])];
18
+ tensor<fp16, [1, 256]> frame_cast_fp16 = slice_by_index(begin = frame_begin_0, end = frame_end_0, end_mask = frame_end_mask_0, x = audio_chunk_to_fp16)[name = tensor<string, []>("frame_cast_fp16")];
19
+ tensor<fp16, [129, 256]> op_26_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [16512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16640))), name = tensor<string, []>("op_26_to_fp16_palettized"), shape = tensor<uint32, [2]>([129, 256])];
20
+ tensor<fp16, [129]> var_38_bias_0_to_fp16 = const()[name = tensor<string, []>("op_38_bias_0_to_fp16"), val = tensor<fp16, [129]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16768)))];
21
+ tensor<fp16, [1, 129]> var_38_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = op_26_to_fp16_palettized, x = frame_1_cast_fp16)[name = tensor<string, []>("op_38_cast_fp16")];
22
+ tensor<fp16, [129, 256]> op_29_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [16512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17152))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33728))), name = tensor<string, []>("op_29_to_fp16_palettized"), shape = tensor<uint32, [2]>([129, 256])];
23
+ tensor<fp16, [1, 129]> var_40_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = op_29_to_fp16_palettized, x = frame_1_cast_fp16)[name = tensor<string, []>("op_40_cast_fp16")];
24
+ tensor<fp16, []> var_20_promoted_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
25
+ tensor<fp16, [1, 129]> var_41_cast_fp16 = pow(x = var_38_cast_fp16, y = var_20_promoted_to_fp16)[name = tensor<string, []>("op_41_cast_fp16")];
26
+ tensor<fp16, []> var_20_promoted_1_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_1_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
27
+ tensor<fp16, [1, 129]> var_42_cast_fp16 = pow(x = var_40_cast_fp16, y = var_20_promoted_1_to_fp16)[name = tensor<string, []>("op_42_cast_fp16")];
28
+ tensor<fp16, [1, 129]> var_43_cast_fp16 = add(x = var_41_cast_fp16, y = var_42_cast_fp16)[name = tensor<string, []>("op_43_cast_fp16")];
29
+ tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.1p-20)];
30
+ tensor<fp16, [1, 129]> var_45_cast_fp16 = add(x = var_43_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
31
+ tensor<fp16, [1, 129]> var_46_cast_fp16 = sqrt(x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
32
+ tensor<fp16, [1, 129]> var_48_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = op_26_to_fp16_palettized, x = frame_3_cast_fp16)[name = tensor<string, []>("op_48_cast_fp16")];
33
+ tensor<fp16, [1, 129]> var_50_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = op_29_to_fp16_palettized, x = frame_3_cast_fp16)[name = tensor<string, []>("op_50_cast_fp16")];
34
+ tensor<fp16, []> var_20_promoted_2_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_2_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
35
+ tensor<fp16, [1, 129]> var_51_cast_fp16 = pow(x = var_48_cast_fp16, y = var_20_promoted_2_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
36
+ tensor<fp16, []> var_20_promoted_3_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_3_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
37
+ tensor<fp16, [1, 129]> var_52_cast_fp16 = pow(x = var_50_cast_fp16, y = var_20_promoted_3_to_fp16)[name = tensor<string, []>("op_52_cast_fp16")];
38
+ tensor<fp16, [1, 129]> var_53_cast_fp16 = add(x = var_51_cast_fp16, y = var_52_cast_fp16)[name = tensor<string, []>("op_53_cast_fp16")];
39
+ tensor<fp16, []> var_54_to_fp16 = const()[name = tensor<string, []>("op_54_to_fp16"), val = tensor<fp16, []>(0x1.1p-20)];
40
+ tensor<fp16, [1, 129]> var_55_cast_fp16 = add(x = var_53_cast_fp16, y = var_54_to_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
41
+ tensor<fp16, [1, 129]> var_56_cast_fp16 = sqrt(x = var_55_cast_fp16)[name = tensor<string, []>("op_56_cast_fp16")];
42
+ tensor<fp16, [1, 129]> var_58_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = op_26_to_fp16_palettized, x = frame_cast_fp16)[name = tensor<string, []>("op_58_cast_fp16")];
43
+ tensor<fp16, [1, 129]> var_60_cast_fp16 = linear(bias = var_38_bias_0_to_fp16, weight = op_29_to_fp16_palettized, x = frame_cast_fp16)[name = tensor<string, []>("op_60_cast_fp16")];
44
+ tensor<fp16, []> var_20_promoted_4_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_4_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
45
+ tensor<fp16, [1, 129]> var_61_cast_fp16 = pow(x = var_58_cast_fp16, y = var_20_promoted_4_to_fp16)[name = tensor<string, []>("op_61_cast_fp16")];
46
+ tensor<fp16, []> var_20_promoted_5_to_fp16 = const()[name = tensor<string, []>("op_20_promoted_5_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
47
+ tensor<fp16, [1, 129]> var_62_cast_fp16 = pow(x = var_60_cast_fp16, y = var_20_promoted_5_to_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
48
+ tensor<fp16, [1, 129]> var_63_cast_fp16 = add(x = var_61_cast_fp16, y = var_62_cast_fp16)[name = tensor<string, []>("op_63_cast_fp16")];
49
+ tensor<fp16, []> var_64_to_fp16 = const()[name = tensor<string, []>("op_64_to_fp16"), val = tensor<fp16, []>(0x1.1p-20)];
50
+ tensor<fp16, [1, 129]> var_65_cast_fp16 = add(x = var_63_cast_fp16, y = var_64_to_fp16)[name = tensor<string, []>("op_65_cast_fp16")];
51
+ tensor<fp16, [1, 129]> magnitude_cast_fp16 = sqrt(x = var_65_cast_fp16)[name = tensor<string, []>("magnitude_cast_fp16")];
52
+ tensor<int32, []> input_1_axis_0 = const()[name = tensor<string, []>("input_1_axis_0"), val = tensor<int32, []>(2)];
53
+ tensor<fp16, [1, 129, 3]> input_1_cast_fp16 = stack(axis = input_1_axis_0, values = (var_46_cast_fp16, var_56_cast_fp16, magnitude_cast_fp16))[name = tensor<string, []>("input_1_cast_fp16")];
54
+ tensor<string, []> x_1_pad_type_0 = const()[name = tensor<string, []>("x_1_pad_type_0"), val = tensor<string, []>("custom")];
55
+ tensor<int32, [2]> x_1_pad_0 = const()[name = tensor<string, []>("x_1_pad_0"), val = tensor<int32, [2]>([1, 1])];
56
+ tensor<int32, [1]> x_1_strides_0 = const()[name = tensor<string, []>("x_1_strides_0"), val = tensor<int32, [1]>([1])];
57
+ tensor<int32, [1]> x_1_dilations_0 = const()[name = tensor<string, []>("x_1_dilations_0"), val = tensor<int32, [1]>([1])];
58
+ tensor<int32, []> x_1_groups_0 = const()[name = tensor<string, []>("x_1_groups_0"), val = tensor<int32, []>(1)];
59
+ tensor<fp16, [128, 129, 3]> vad_encoder_encoder_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [24768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33856))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(58688))), name = tensor<string, []>("vad_encoder_encoder_0_conv_weight_to_fp16_palettized"), shape = tensor<uint32, [3]>([128, 129, 3])];
60
+ tensor<fp16, [128]> vad_encoder_encoder_0_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_conv_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(58816)))];
61
+ tensor<fp16, [1, 128, 3]> x_1_cast_fp16 = conv(bias = vad_encoder_encoder_0_conv_bias_to_fp16, dilations = x_1_dilations_0, groups = x_1_groups_0, pad = x_1_pad_0, pad_type = x_1_pad_type_0, strides = x_1_strides_0, weight = vad_encoder_encoder_0_conv_weight_to_fp16_palettized, x = input_1_cast_fp16)[name = tensor<string, []>("x_1_cast_fp16")];
62
+ tensor<int32, [1]> reduce_mean_0_axes_0 = const()[name = tensor<string, []>("reduce_mean_0_axes_0"), val = tensor<int32, [1]>([-1])];
63
+ tensor<bool, []> reduce_mean_0_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_0_keep_dims_0"), val = tensor<bool, []>(true)];
64
+ tensor<fp16, [1, 128, 1]> reduce_mean_0_cast_fp16 = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = x_1_cast_fp16)[name = tensor<string, []>("reduce_mean_0_cast_fp16")];
65
+ tensor<int32, []> concat_0_axis_0 = const()[name = tensor<string, []>("concat_0_axis_0"), val = tensor<int32, []>(-1)];
66
+ tensor<bool, []> concat_0_interleave_0 = const()[name = tensor<string, []>("concat_0_interleave_0"), val = tensor<bool, []>(false)];
67
+ tensor<fp16, [1, 128, 1]> concat_0_cast_fp16 = concat(axis = concat_0_axis_0, interleave = concat_0_interleave_0, values = reduce_mean_0_cast_fp16)[name = tensor<string, []>("concat_0_cast_fp16")];
68
+ tensor<int32, [2]> var_92 = const()[name = tensor<string, []>("op_92"), val = tensor<int32, [2]>([1, 128])];
69
+ tensor<fp16, [1, 128]> input_3_cast_fp16 = reshape(shape = var_92, x = concat_0_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
70
+ tensor<fp16, [16, 128]> vad_encoder_encoder_0_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc1_weight_to_fp16"), val = tensor<fp16, [16, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(59136)))];
71
+ tensor<fp16, [16]> vad_encoder_encoder_0_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc1_bias_to_fp16"), val = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(63296)))];
72
+ tensor<fp16, [1, 16]> linear_0_cast_fp16 = linear(bias = vad_encoder_encoder_0_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_0_se_fc1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
73
+ tensor<fp16, [1, 16]> input_7_cast_fp16 = relu(x = linear_0_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
74
+ tensor<fp16, [128, 16]> vad_encoder_encoder_0_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc2_weight_to_fp16"), val = tensor<fp16, [128, 16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(63424)))];
75
+ tensor<fp16, [128]> vad_encoder_encoder_0_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_0_se_fc2_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(67584)))];
76
+ tensor<fp16, [1, 128]> linear_1_cast_fp16 = linear(bias = vad_encoder_encoder_0_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_0_se_fc2_weight_to_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("linear_1_cast_fp16")];
77
+ tensor<fp16, [1, 128]> y_1_cast_fp16 = sigmoid(x = linear_1_cast_fp16)[name = tensor<string, []>("y_1_cast_fp16")];
78
+ tensor<int32, [3]> var_102 = const()[name = tensor<string, []>("op_102"), val = tensor<int32, [3]>([1, 128, 1])];
79
+ tensor<fp16, [1, 128, 1]> y_3_cast_fp16 = reshape(shape = var_102, x = y_1_cast_fp16)[name = tensor<string, []>("y_3_cast_fp16")];
80
+ tensor<fp16, [1, 128, 3]> input_11_cast_fp16 = mul(x = x_1_cast_fp16, y = y_3_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
81
+ tensor<fp16, [1, 128, 3]> input_13_cast_fp16 = relu(x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
82
+ tensor<string, []> x_3_pad_type_0 = const()[name = tensor<string, []>("x_3_pad_type_0"), val = tensor<string, []>("custom")];
83
+ tensor<int32, [2]> x_3_pad_0 = const()[name = tensor<string, []>("x_3_pad_0"), val = tensor<int32, [2]>([1, 1])];
84
+ tensor<int32, [1]> x_3_strides_0 = const()[name = tensor<string, []>("x_3_strides_0"), val = tensor<int32, [1]>([1])];
85
+ tensor<int32, [1]> x_3_dilations_0 = const()[name = tensor<string, []>("x_3_dilations_0"), val = tensor<int32, [1]>([1])];
86
+ tensor<int32, []> x_3_groups_0 = const()[name = tensor<string, []>("x_3_groups_0"), val = tensor<int32, []>(1)];
87
+ tensor<fp16, [64, 128, 3]> vad_encoder_encoder_1_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [12288]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(67904))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(80256))), name = tensor<string, []>("vad_encoder_encoder_1_conv_weight_to_fp16_palettized"), shape = tensor<uint32, [3]>([64, 128, 3])];
88
+ tensor<fp16, [64]> vad_encoder_encoder_1_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_conv_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(80384)))];
89
+ tensor<fp16, [1, 64, 3]> x_3_cast_fp16 = conv(bias = vad_encoder_encoder_1_conv_bias_to_fp16, dilations = x_3_dilations_0, groups = x_3_groups_0, pad = x_3_pad_0, pad_type = x_3_pad_type_0, strides = x_3_strides_0, weight = vad_encoder_encoder_1_conv_weight_to_fp16_palettized, x = input_13_cast_fp16)[name = tensor<string, []>("x_3_cast_fp16")];
90
+ tensor<int32, [1]> reduce_mean_1_axes_0 = const()[name = tensor<string, []>("reduce_mean_1_axes_0"), val = tensor<int32, [1]>([-1])];
91
+ tensor<bool, []> reduce_mean_1_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_1_keep_dims_0"), val = tensor<bool, []>(true)];
92
+ tensor<fp16, [1, 64, 1]> reduce_mean_1_cast_fp16 = reduce_mean(axes = reduce_mean_1_axes_0, keep_dims = reduce_mean_1_keep_dims_0, x = x_3_cast_fp16)[name = tensor<string, []>("reduce_mean_1_cast_fp16")];
93
+ tensor<int32, []> concat_1_axis_0 = const()[name = tensor<string, []>("concat_1_axis_0"), val = tensor<int32, []>(-1)];
94
+ tensor<bool, []> concat_1_interleave_0 = const()[name = tensor<string, []>("concat_1_interleave_0"), val = tensor<bool, []>(false)];
95
+ tensor<fp16, [1, 64, 1]> concat_1_cast_fp16 = concat(axis = concat_1_axis_0, interleave = concat_1_interleave_0, values = reduce_mean_1_cast_fp16)[name = tensor<string, []>("concat_1_cast_fp16")];
96
+ tensor<int32, [2]> var_121 = const()[name = tensor<string, []>("op_121"), val = tensor<int32, [2]>([1, 64])];
97
+ tensor<fp16, [1, 64]> input_15_cast_fp16 = reshape(shape = var_121, x = concat_1_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
98
+ tensor<fp16, [8, 64]> vad_encoder_encoder_1_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc1_weight_to_fp16"), val = tensor<fp16, [8, 64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(80576)))];
99
+ tensor<fp16, [8]> vad_encoder_encoder_1_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc1_bias_to_fp16"), val = tensor<fp16, [8]>([0x1.00cp-9, 0x1.6dcp-5, 0x1.82cp-5, 0x1.054p-5, 0x1.8a4p-4, 0x1.f88p-7, 0x1.234p-5, 0x1.514p-5])];
100
+ tensor<fp16, [1, 8]> linear_2_cast_fp16 = linear(bias = vad_encoder_encoder_1_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_1_se_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("linear_2_cast_fp16")];
101
+ tensor<fp16, [1, 8]> input_19_cast_fp16 = relu(x = linear_2_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
102
+ tensor<fp16, [64, 8]> vad_encoder_encoder_1_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc2_weight_to_fp16"), val = tensor<fp16, [64, 8]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81664)))];
103
+ tensor<fp16, [64]> vad_encoder_encoder_1_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_1_se_fc2_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82752)))];
104
+ tensor<fp16, [1, 64]> linear_3_cast_fp16 = linear(bias = vad_encoder_encoder_1_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_1_se_fc2_weight_to_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("linear_3_cast_fp16")];
105
+ tensor<fp16, [1, 64]> y_5_cast_fp16 = sigmoid(x = linear_3_cast_fp16)[name = tensor<string, []>("y_5_cast_fp16")];
106
+ tensor<int32, [3]> var_131 = const()[name = tensor<string, []>("op_131"), val = tensor<int32, [3]>([1, 64, 1])];
107
+ tensor<fp16, [1, 64, 1]> y_7_cast_fp16 = reshape(shape = var_131, x = y_5_cast_fp16)[name = tensor<string, []>("y_7_cast_fp16")];
108
+ tensor<fp16, [1, 64, 3]> input_23_cast_fp16 = mul(x = x_3_cast_fp16, y = y_7_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
109
+ tensor<fp16, [1, 64, 3]> input_25_cast_fp16 = relu(x = input_23_cast_fp16)[name = tensor<string, []>("input_25_cast_fp16")];
110
+ tensor<string, []> x_5_pad_type_0 = const()[name = tensor<string, []>("x_5_pad_type_0"), val = tensor<string, []>("custom")];
111
+ tensor<int32, [2]> x_5_pad_0 = const()[name = tensor<string, []>("x_5_pad_0"), val = tensor<int32, [2]>([1, 1])];
112
+ tensor<int32, [1]> x_5_strides_0 = const()[name = tensor<string, []>("x_5_strides_0"), val = tensor<int32, [1]>([1])];
113
+ tensor<int32, [1]> x_5_dilations_0 = const()[name = tensor<string, []>("x_5_dilations_0"), val = tensor<int32, [1]>([1])];
114
+ tensor<int32, []> x_5_groups_0 = const()[name = tensor<string, []>("x_5_groups_0"), val = tensor<int32, []>(1)];
115
+ tensor<fp16, [64, 64, 3]> vad_encoder_encoder_2_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6144]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82944))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89152))), name = tensor<string, []>("vad_encoder_encoder_2_conv_weight_to_fp16_palettized"), shape = tensor<uint32, [3]>([64, 64, 3])];
116
+ tensor<fp16, [64]> vad_encoder_encoder_2_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_conv_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89280)))];
117
+ tensor<fp16, [1, 64, 3]> x_5_cast_fp16 = conv(bias = vad_encoder_encoder_2_conv_bias_to_fp16, dilations = x_5_dilations_0, groups = x_5_groups_0, pad = x_5_pad_0, pad_type = x_5_pad_type_0, strides = x_5_strides_0, weight = vad_encoder_encoder_2_conv_weight_to_fp16_palettized, x = input_25_cast_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
118
+ tensor<int32, [1]> reduce_mean_2_axes_0 = const()[name = tensor<string, []>("reduce_mean_2_axes_0"), val = tensor<int32, [1]>([-1])];
119
+ tensor<bool, []> reduce_mean_2_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_2_keep_dims_0"), val = tensor<bool, []>(true)];
120
+ tensor<fp16, [1, 64, 1]> reduce_mean_2_cast_fp16 = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = x_5_cast_fp16)[name = tensor<string, []>("reduce_mean_2_cast_fp16")];
121
+ tensor<int32, []> concat_2_axis_0 = const()[name = tensor<string, []>("concat_2_axis_0"), val = tensor<int32, []>(-1)];
122
+ tensor<bool, []> concat_2_interleave_0 = const()[name = tensor<string, []>("concat_2_interleave_0"), val = tensor<bool, []>(false)];
123
+ tensor<fp16, [1, 64, 1]> concat_2_cast_fp16 = concat(axis = concat_2_axis_0, interleave = concat_2_interleave_0, values = reduce_mean_2_cast_fp16)[name = tensor<string, []>("concat_2_cast_fp16")];
124
+ tensor<int32, [2]> var_150 = const()[name = tensor<string, []>("op_150"), val = tensor<int32, [2]>([1, 64])];
125
+ tensor<fp16, [1, 64]> input_27_cast_fp16 = reshape(shape = var_150, x = concat_2_cast_fp16)[name = tensor<string, []>("input_27_cast_fp16")];
126
+ tensor<fp16, [8, 64]> vad_encoder_encoder_2_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc1_weight_to_fp16"), val = tensor<fp16, [8, 64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89472)))];
127
+ tensor<fp16, [8]> vad_encoder_encoder_2_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc1_bias_to_fp16"), val = tensor<fp16, [8]>([0x1.41cp-7, 0x1.4e8p-5, 0x1.7ccp-5, 0x1.554p-5, 0x1.d8cp-5, -0x1.34p-11, 0x1.2f8p-5, 0x1.0ap-5])];
128
+ tensor<fp16, [1, 8]> linear_4_cast_fp16 = linear(bias = vad_encoder_encoder_2_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_2_se_fc1_weight_to_fp16, x = input_27_cast_fp16)[name = tensor<string, []>("linear_4_cast_fp16")];
129
+ tensor<fp16, [1, 8]> input_31_cast_fp16 = relu(x = linear_4_cast_fp16)[name = tensor<string, []>("input_31_cast_fp16")];
130
+ tensor<fp16, [64, 8]> vad_encoder_encoder_2_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc2_weight_to_fp16"), val = tensor<fp16, [64, 8]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90560)))];
131
+ tensor<fp16, [64]> vad_encoder_encoder_2_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_2_se_fc2_bias_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91648)))];
132
+ tensor<fp16, [1, 64]> linear_5_cast_fp16 = linear(bias = vad_encoder_encoder_2_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_2_se_fc2_weight_to_fp16, x = input_31_cast_fp16)[name = tensor<string, []>("linear_5_cast_fp16")];
133
+ tensor<fp16, [1, 64]> y_9_cast_fp16 = sigmoid(x = linear_5_cast_fp16)[name = tensor<string, []>("y_9_cast_fp16")];
134
+ tensor<int32, [3]> var_160 = const()[name = tensor<string, []>("op_160"), val = tensor<int32, [3]>([1, 64, 1])];
135
+ tensor<fp16, [1, 64, 1]> y_11_cast_fp16 = reshape(shape = var_160, x = y_9_cast_fp16)[name = tensor<string, []>("y_11_cast_fp16")];
136
+ tensor<fp16, [1, 64, 3]> input_35_cast_fp16 = mul(x = x_5_cast_fp16, y = y_11_cast_fp16)[name = tensor<string, []>("input_35_cast_fp16")];
137
+ tensor<fp16, [1, 64, 3]> input_37_cast_fp16 = relu(x = input_35_cast_fp16)[name = tensor<string, []>("input_37_cast_fp16")];
138
+ tensor<string, []> x_7_pad_type_0 = const()[name = tensor<string, []>("x_7_pad_type_0"), val = tensor<string, []>("custom")];
139
+ tensor<int32, [2]> x_7_pad_0 = const()[name = tensor<string, []>("x_7_pad_0"), val = tensor<int32, [2]>([1, 1])];
140
+ tensor<int32, [1]> x_7_strides_0 = const()[name = tensor<string, []>("x_7_strides_0"), val = tensor<int32, [1]>([1])];
141
+ tensor<int32, [1]> x_7_dilations_0 = const()[name = tensor<string, []>("x_7_dilations_0"), val = tensor<int32, [1]>([1])];
142
+ tensor<int32, []> x_7_groups_0 = const()[name = tensor<string, []>("x_7_groups_0"), val = tensor<int32, []>(1)];
143
+ tensor<fp16, [128, 64, 3]> vad_encoder_encoder_3_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [12288]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104192))), name = tensor<string, []>("vad_encoder_encoder_3_conv_weight_to_fp16_palettized"), shape = tensor<uint32, [3]>([128, 64, 3])];
144
+ tensor<fp16, [128]> vad_encoder_encoder_3_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_conv_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104320)))];
145
+ tensor<fp16, [1, 128, 3]> x_7_cast_fp16 = conv(bias = vad_encoder_encoder_3_conv_bias_to_fp16, dilations = x_7_dilations_0, groups = x_7_groups_0, pad = x_7_pad_0, pad_type = x_7_pad_type_0, strides = x_7_strides_0, weight = vad_encoder_encoder_3_conv_weight_to_fp16_palettized, x = input_37_cast_fp16)[name = tensor<string, []>("x_7_cast_fp16")];
146
+ tensor<int32, [1]> reduce_mean_3_axes_0 = const()[name = tensor<string, []>("reduce_mean_3_axes_0"), val = tensor<int32, [1]>([-1])];
147
+ tensor<bool, []> reduce_mean_3_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_3_keep_dims_0"), val = tensor<bool, []>(true)];
148
+ tensor<fp16, [1, 128, 1]> reduce_mean_3_cast_fp16 = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = x_7_cast_fp16)[name = tensor<string, []>("reduce_mean_3_cast_fp16")];
149
+ tensor<int32, []> concat_3_axis_0 = const()[name = tensor<string, []>("concat_3_axis_0"), val = tensor<int32, []>(-1)];
150
+ tensor<bool, []> concat_3_interleave_0 = const()[name = tensor<string, []>("concat_3_interleave_0"), val = tensor<bool, []>(false)];
151
+ tensor<fp16, [1, 128, 1]> concat_3_cast_fp16 = concat(axis = concat_3_axis_0, interleave = concat_3_interleave_0, values = reduce_mean_3_cast_fp16)[name = tensor<string, []>("concat_3_cast_fp16")];
152
+ tensor<int32, [2]> var_179 = const()[name = tensor<string, []>("op_179"), val = tensor<int32, [2]>([1, 128])];
153
+ tensor<fp16, [1, 128]> input_39_cast_fp16 = reshape(shape = var_179, x = concat_3_cast_fp16)[name = tensor<string, []>("input_39_cast_fp16")];
154
+ tensor<fp16, [16, 128]> vad_encoder_encoder_3_se_fc1_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc1_weight_to_fp16"), val = tensor<fp16, [16, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104640)))];
155
+ tensor<fp16, [16]> vad_encoder_encoder_3_se_fc1_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc1_bias_to_fp16"), val = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108800)))];
156
+ tensor<fp16, [1, 16]> linear_6_cast_fp16 = linear(bias = vad_encoder_encoder_3_se_fc1_bias_to_fp16, weight = vad_encoder_encoder_3_se_fc1_weight_to_fp16, x = input_39_cast_fp16)[name = tensor<string, []>("linear_6_cast_fp16")];
157
+ tensor<fp16, [1, 16]> input_43_cast_fp16 = relu(x = linear_6_cast_fp16)[name = tensor<string, []>("input_43_cast_fp16")];
158
+ tensor<fp16, [128, 16]> vad_encoder_encoder_3_se_fc2_weight_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc2_weight_to_fp16"), val = tensor<fp16, [128, 16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108928)))];
159
+ tensor<fp16, [128]> vad_encoder_encoder_3_se_fc2_bias_to_fp16 = const()[name = tensor<string, []>("vad_encoder_encoder_3_se_fc2_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113088)))];
160
+ tensor<fp16, [1, 128]> linear_7_cast_fp16 = linear(bias = vad_encoder_encoder_3_se_fc2_bias_to_fp16, weight = vad_encoder_encoder_3_se_fc2_weight_to_fp16, x = input_43_cast_fp16)[name = tensor<string, []>("linear_7_cast_fp16")];
161
+ tensor<fp16, [1, 128]> y_13_cast_fp16 = sigmoid(x = linear_7_cast_fp16)[name = tensor<string, []>("y_13_cast_fp16")];
162
+ tensor<int32, [3]> var_189 = const()[name = tensor<string, []>("op_189"), val = tensor<int32, [3]>([1, 128, 1])];
163
+ tensor<fp16, [1, 128, 1]> y_cast_fp16 = reshape(shape = var_189, x = y_13_cast_fp16)[name = tensor<string, []>("y_cast_fp16")];
164
+ tensor<fp16, [1, 128, 3]> input_47_cast_fp16 = mul(x = x_7_cast_fp16, y = y_cast_fp16)[name = tensor<string, []>("input_47_cast_fp16")];
165
+ tensor<fp16, [1, 128, 3]> x_9_cast_fp16 = relu(x = input_47_cast_fp16)[name = tensor<string, []>("x_9_cast_fp16")];
166
+ tensor<int32, [1]> x_11_axes_0 = const()[name = tensor<string, []>("x_11_axes_0"), val = tensor<int32, [1]>([2])];
167
+ tensor<bool, []> x_11_keep_dims_0 = const()[name = tensor<string, []>("x_11_keep_dims_0"), val = tensor<bool, []>(true)];
168
+ tensor<fp16, [1, 128, 1]> x_11_cast_fp16 = reduce_mean(axes = x_11_axes_0, keep_dims = x_11_keep_dims_0, x = x_9_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
169
+ tensor<int32, [3]> transpose_6_perm_0 = const()[name = tensor<string, []>("transpose_6_perm_0"), val = tensor<int32, [3]>([2, 0, 1])];
170
+ tensor<string, []> transpose_6_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("transpose_6_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
171
+ tensor<fp32, [512]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<fp32, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113408)))];
172
+ tensor<fp32, [512, 128]> concat_5_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [32768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115520))), lut = tensor<fp32, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(148352))), name = tensor<string, []>("concat_5_palettized"), shape = tensor<uint32, [2]>([512, 128])];
173
+ tensor<fp32, [512, 128]> concat_6_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [32768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(148480))), lut = tensor<fp32, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(181312))), name = tensor<string, []>("concat_6_palettized"), shape = tensor<uint32, [2]>([512, 128])];
174
+ tensor<fp32, [1, 128]> input_49_batch_first_lstm_h0_squeeze = const()[name = tensor<string, []>("input_49_batch_first_lstm_h0_squeeze"), val = tensor<fp32, [1, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(181440)))];
175
+ tensor<string, []> input_49_batch_first_direction_0 = const()[name = tensor<string, []>("input_49_batch_first_direction_0"), val = tensor<string, []>("forward")];
176
+ tensor<bool, []> input_49_batch_first_output_sequence_0 = const()[name = tensor<string, []>("input_49_batch_first_output_sequence_0"), val = tensor<bool, []>(true)];
177
+ tensor<string, []> input_49_batch_first_recurrent_activation_0 = const()[name = tensor<string, []>("input_49_batch_first_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
178
+ tensor<string, []> input_49_batch_first_cell_activation_0 = const()[name = tensor<string, []>("input_49_batch_first_cell_activation_0"), val = tensor<string, []>("tanh")];
179
+ tensor<string, []> input_49_batch_first_activation_0 = const()[name = tensor<string, []>("input_49_batch_first_activation_0"), val = tensor<string, []>("tanh")];
180
+ tensor<fp16, [1, 1, 128]> transpose_6_cast_fp16 = transpose(perm = transpose_6_perm_0, x = x_11_cast_fp16)[name = tensor<string, []>("transpose_2")];
181
+ tensor<fp32, [1, 1, 128]> transpose_6_cast_fp16_to_fp32 = cast(dtype = transpose_6_cast_fp16_to_fp32_dtype_0, x = transpose_6_cast_fp16)[name = tensor<string, []>("cast_2")];
182
+ tensor<fp32, [1, 1, 128]> input_49_batch_first_0, tensor<fp32, [1, 128]> input_49_batch_first_1, tensor<fp32, [1, 128]> input_49_batch_first_2 = lstm(activation = input_49_batch_first_activation_0, bias = concat_4, cell_activation = input_49_batch_first_cell_activation_0, direction = input_49_batch_first_direction_0, initial_c = input_49_batch_first_lstm_h0_squeeze, initial_h = input_49_batch_first_lstm_h0_squeeze, output_sequence = input_49_batch_first_output_sequence_0, recurrent_activation = input_49_batch_first_recurrent_activation_0, weight_hh = concat_6_palettized, weight_ih = concat_5_palettized, x = transpose_6_cast_fp16_to_fp32)[name = tensor<string, []>("input_49_batch_first")];
183
+ tensor<int32, [3]> input_49_perm_0 = const()[name = tensor<string, []>("input_49_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
184
+ tensor<string, []> input_49_batch_first_0_to_fp16_dtype_0 = const()[name = tensor<string, []>("input_49_batch_first_0_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
185
+ tensor<int32, [1]> var_216_axes_0 = const()[name = tensor<string, []>("op_216_axes_0"), val = tensor<int32, [1]>([-1])];
186
+ tensor<fp16, [128]> vad_decoder_layer_norm_weight_to_fp16 = const()[name = tensor<string, []>("vad_decoder_layer_norm_weight_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(182016)))];
187
+ tensor<fp16, [128]> vad_decoder_layer_norm_bias_to_fp16 = const()[name = tensor<string, []>("vad_decoder_layer_norm_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(182336)))];
188
+ tensor<fp16, []> var_5_to_fp16 = const()[name = tensor<string, []>("op_5_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
189
+ tensor<fp16, [1, 1, 128]> input_49_batch_first_0_to_fp16 = cast(dtype = input_49_batch_first_0_to_fp16_dtype_0, x = input_49_batch_first_0)[name = tensor<string, []>("cast_1")];
190
+ tensor<fp16, [1, 1, 128]> input_49_cast_fp16 = transpose(perm = input_49_perm_0, x = input_49_batch_first_0_to_fp16)[name = tensor<string, []>("transpose_1")];
191
+ tensor<fp16, [1, 1, 128]> var_216_cast_fp16 = layer_norm(axes = var_216_axes_0, beta = vad_decoder_layer_norm_bias_to_fp16, epsilon = var_5_to_fp16, gamma = vad_decoder_layer_norm_weight_to_fp16, x = input_49_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
192
+ tensor<fp16, []> var_217_to_fp16 = const()[name = tensor<string, []>("op_217_to_fp16"), val = tensor<fp16, []>(0x1.334p-3)];
193
+ tensor<fp16, [1, 1, 128]> x_cast_fp16 = mul(x = var_216_cast_fp16, y = var_217_to_fp16)[name = tensor<string, []>("x_cast_fp16")];
194
+ tensor<int32, [3]> input_51_perm_0 = const()[name = tensor<string, []>("input_51_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
195
+ tensor<fp16, [1, 128, 1]> input_51_cast_fp16 = transpose(perm = input_51_perm_0, x = x_cast_fp16)[name = tensor<string, []>("transpose_0")];
196
+ tensor<fp16, [1, 128, 1]> input_55_cast_fp16 = relu(x = input_51_cast_fp16)[name = tensor<string, []>("input_55_cast_fp16")];
197
+ tensor<string, []> input_pad_type_0 = const()[name = tensor<string, []>("input_pad_type_0"), val = tensor<string, []>("valid")];
198
+ tensor<int32, [1]> input_strides_0 = const()[name = tensor<string, []>("input_strides_0"), val = tensor<int32, [1]>([1])];
199
+ tensor<int32, [2]> input_pad_0 = const()[name = tensor<string, []>("input_pad_0"), val = tensor<int32, [2]>([0, 0])];
200
+ tensor<int32, [1]> input_dilations_0 = const()[name = tensor<string, []>("input_dilations_0"), val = tensor<int32, [1]>([1])];
201
+ tensor<int32, []> input_groups_0 = const()[name = tensor<string, []>("input_groups_0"), val = tensor<int32, []>(1)];
202
+ tensor<fp16, [1, 128, 1]> vad_decoder_conv_weight_to_fp16 = const()[name = tensor<string, []>("vad_decoder_conv_weight_to_fp16"), val = tensor<fp16, [1, 128, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(182656)))];
203
+ tensor<fp16, [1]> vad_decoder_conv_bias_to_fp16 = const()[name = tensor<string, []>("vad_decoder_conv_bias_to_fp16"), val = tensor<fp16, [1]>([0x1.dfp-5])];
204
+ tensor<fp16, [1, 1, 1]> input_cast_fp16 = conv(bias = vad_decoder_conv_bias_to_fp16, dilations = input_dilations_0, groups = input_groups_0, pad = input_pad_0, pad_type = input_pad_type_0, strides = input_strides_0, weight = vad_decoder_conv_weight_to_fp16, x = input_55_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
205
+ tensor<fp16, [1, 1, 1]> var_229_cast_fp16 = sigmoid(x = input_cast_fp16)[name = tensor<string, []>("op_229_cast_fp16")];
206
+ tensor<int32, [1]> var_230_axes_0 = const()[name = tensor<string, []>("op_230_axes_0"), val = tensor<int32, [1]>([-1])];
207
+ tensor<fp16, [1, 1]> var_230_cast_fp16 = squeeze(axes = var_230_axes_0, x = var_229_cast_fp16)[name = tensor<string, []>("op_230_cast_fp16")];
208
+ tensor<string, []> var_230_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_230_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
209
+ tensor<fp32, [1, 1]> vad_probability = cast(dtype = var_230_cast_fp16_to_fp32_dtype_0, x = var_230_cast_fp16)[name = tensor<string, []>("cast_0")];
210
+ } -> (vad_probability);
211
+ }
silero_vad_se_trained_4bit.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f3db1305052551cd81139f0a0de3d5555fa42de357164888f470e2547bb120c
3
+ size 182976