Update README.md
Browse files
README.md
CHANGED
|
@@ -84,51 +84,50 @@ Two-class stress probability:
|
|
| 84 |
|
| 85 |
## How to Use
|
| 86 |
|
| 87 |
-
Below is a self-contained example that dynamically downloads both the model code (`models.py`) and the weights from the Hub:
|
| 88 |
|
| 89 |
```python
|
| 90 |
from huggingface_hub import hf_hub_download
|
| 91 |
import importlib.util
|
|
|
|
| 92 |
import torch
|
| 93 |
import torch.nn.functional as F
|
| 94 |
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
)
|
| 100 |
-
spec = importlib.util.spec_from_file_location("models", code_path)
|
| 101 |
-
models = importlib.util.module_from_spec(spec)
|
| 102 |
-
spec.loader.exec_module(models)
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
# 2)
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
#
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
print(f"Stress probability: {stress_p:.2f}%")
|
| 131 |
-
|
| 132 |
```
|
| 133 |
|
| 134 |
## Citation
|
|
|
|
| 84 |
|
| 85 |
## How to Use
|
| 86 |
|
| 87 |
+
Below is a self-contained example that dynamically downloads both the model code (`models.py`) and the weights from the Hub, then runs inference via the Hugging Face Transformers API—all in one script:
|
| 88 |
|
| 89 |
```python
|
| 90 |
from huggingface_hub import hf_hub_download
|
| 91 |
import importlib.util
|
| 92 |
+
from transformers import AutoConfig, AutoModelForAudioClassification
|
| 93 |
import torch
|
| 94 |
import torch.nn.functional as F
|
| 95 |
|
| 96 |
+
def main():
|
| 97 |
+
repo = "forwarder1121/voice-based-stress-recognition"
|
| 98 |
+
|
| 99 |
+
# 1) Dynamically download & load the custom models.py
|
| 100 |
+
code_path = hf_hub_download(repo_id=repo, filename="models.py")
|
| 101 |
+
spec = importlib.util.spec_from_file_location("models", code_path)
|
| 102 |
+
models = importlib.util.module_from_spec(spec)
|
| 103 |
+
spec.loader.exec_module(models)
|
| 104 |
+
# now we have models.StudentForAudioClassification and models.StressConfig
|
| 105 |
+
|
| 106 |
+
# 2) Load config & model via Transformers (with remote code trust)
|
| 107 |
+
cfg = AutoConfig.from_pretrained(repo, trust_remote_code=True)
|
| 108 |
+
model = AutoModelForAudioClassification.from_pretrained(
|
| 109 |
+
repo,
|
| 110 |
+
trust_remote_code=True,
|
| 111 |
+
torch_dtype="auto"
|
| 112 |
+
)
|
| 113 |
+
model.eval()
|
| 114 |
+
|
| 115 |
+
# 3) Prepare a dummy W2V embedding for testing
|
| 116 |
+
# In real use, replace this with your (1, 512) pre-computed W2V tensor.
|
| 117 |
+
batch_size = 1
|
| 118 |
+
DIM_W2V = 512
|
| 119 |
+
x_w2v = torch.randn(batch_size, DIM_W2V, dtype=next(model.parameters()).dtype)
|
| 120 |
+
|
| 121 |
+
# 4) Inference
|
| 122 |
+
with torch.no_grad():
|
| 123 |
+
outputs = model(x_w2v) # SequenceClassifierOutput
|
| 124 |
+
probs = F.softmax(outputs.logits, dim=-1)
|
| 125 |
+
|
| 126 |
+
print(f"Not stressed: {probs[0,0]*100:.1f}%")
|
| 127 |
+
print(f"Stressed : {probs[0,1]*100:.1f}%")
|
| 128 |
+
|
| 129 |
+
if __name__ == "__main__":
|
| 130 |
+
main()
|
|
|
|
|
|
|
| 131 |
```
|
| 132 |
|
| 133 |
## Citation
|