forwarder1121 commited on
Commit
1dce8fe
·
verified ·
1 Parent(s): 71a81a5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +37 -38
README.md CHANGED
@@ -84,51 +84,50 @@ Two-class stress probability:
84
 
85
  ## How to Use
86
 
87
- Below is a self-contained example that dynamically downloads both the model code (`models.py`) and the weights from the Hub:
88
 
89
  ```python
90
  from huggingface_hub import hf_hub_download
91
  import importlib.util
 
92
  import torch
93
  import torch.nn.functional as F
94
 
95
- # 1) Download and load the model definition
96
- code_path = hf_hub_download(
97
- repo_id="forwarder1121/voice-based-stress-recognition",
98
- filename="models.py"
99
- )
100
- spec = importlib.util.spec_from_file_location("models", code_path)
101
- models = importlib.util.module_from_spec(spec)
102
- spec.loader.exec_module(models)
103
- StudentNet = models.StudentNet
104
-
105
- # 2) Download the checkpoint
106
- ckpt_path = hf_hub_download(
107
- repo_id="forwarder1121/voice-based-stress-recognition",
108
- filename="pytorch_model.bin"
109
- )
110
-
111
- # 3) Instantiate and load weights
112
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
113
- model = StudentNet().to(device)
114
-
115
- state = torch.load(ckpt_path, map_location=device, weights_only=True)
116
- model.load_state_dict(state)
117
- model.eval()
118
-
119
- # ────── Create x_w2v here ──────
120
- # In real use, you would insert your preprocessed W2V embedding.
121
- # For testing, we create a random tensor of shape (1, 512).
122
- x_w2v = torch.randn(1, 512, device=device)
123
-
124
- # 4) Inference
125
- with torch.no_grad():
126
- logits, _ = model(x_w2v)
127
- probs = F.softmax(logits, dim=1)
128
- stress_p = (probs[0, 1] * 100).item() # Probability in [0, 100]
129
-
130
- print(f"Stress probability: {stress_p:.2f}%")
131
-
132
  ```
133
 
134
  ## Citation
 
84
 
85
  ## How to Use
86
 
87
+ Below is a self-contained example that dynamically downloads both the model code (`models.py`) and the weights from the Hub, then runs inference via the Hugging Face Transformers API—all in one script:
88
 
89
  ```python
90
  from huggingface_hub import hf_hub_download
91
  import importlib.util
92
+ from transformers import AutoConfig, AutoModelForAudioClassification
93
  import torch
94
  import torch.nn.functional as F
95
 
96
+ def main():
97
+ repo = "forwarder1121/voice-based-stress-recognition"
98
+
99
+ # 1) Dynamically download & load the custom models.py
100
+ code_path = hf_hub_download(repo_id=repo, filename="models.py")
101
+ spec = importlib.util.spec_from_file_location("models", code_path)
102
+ models = importlib.util.module_from_spec(spec)
103
+ spec.loader.exec_module(models)
104
+ # now we have models.StudentForAudioClassification and models.StressConfig
105
+
106
+ # 2) Load config & model via Transformers (with remote code trust)
107
+ cfg = AutoConfig.from_pretrained(repo, trust_remote_code=True)
108
+ model = AutoModelForAudioClassification.from_pretrained(
109
+ repo,
110
+ trust_remote_code=True,
111
+ torch_dtype="auto"
112
+ )
113
+ model.eval()
114
+
115
+ # 3) Prepare a dummy W2V embedding for testing
116
+ # In real use, replace this with your (1, 512) pre-computed W2V tensor.
117
+ batch_size = 1
118
+ DIM_W2V = 512
119
+ x_w2v = torch.randn(batch_size, DIM_W2V, dtype=next(model.parameters()).dtype)
120
+
121
+ # 4) Inference
122
+ with torch.no_grad():
123
+ outputs = model(x_w2v) # SequenceClassifierOutput
124
+ probs = F.softmax(outputs.logits, dim=-1)
125
+
126
+ print(f"Not stressed: {probs[0,0]*100:.1f}%")
127
+ print(f"Stressed : {probs[0,1]*100:.1f}%")
128
+
129
+ if __name__ == "__main__":
130
+ main()
 
 
131
  ```
132
 
133
  ## Citation