karenlu653 commited on
Commit
a0cfc69
·
verified ·
1 Parent(s): 9d67c3e

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. README.md +62 -3
  2. config.json +13 -0
  3. label_mapping.json +4 -0
  4. model.safetensors +3 -0
  5. preprocessor_config.json +10 -0
README.md CHANGED
@@ -1,3 +1,62 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # shanghai-binary
2
+
3
+ Binary classifier: **Shanghai** vs **Not-Shanghai** (audio FBANK → GRU → MLP).
4
+
5
+ ## Files
6
+ - `model.safetensors` — PyTorch weights (safetensors)
7
+ - `config.json` — model architecture
8
+ - `preprocessor_config.json` — audio feature extraction settings
9
+ - `label_mapping.json` — index → label
10
+
11
+ ## Inference (PyTorch)
12
+ ```python
13
+ import torch, json, numpy as np, librosa
14
+ from safetensors.torch import load_file as load_safetensors
15
+
16
+ # Load config
17
+ import json, os
18
+ model_dir = "./hf/models/shanghai-binary"
19
+ cfg = json.load(open(os.path.join(model_dir, "config.json")))
20
+ pp = json.load(open(os.path.join(model_dir, "preprocessor_config.json")))
21
+ lm = json.load(open(os.path.join(model_dir, "label_mapping.json")))
22
+
23
+ # Define the model class you trained (LanNetBinary)
24
+ # (Same as in your training notebook)
25
+ class LanNetBinary(torch.nn.Module):
26
+ def __init__(self, input_dim=40, hidden_dim=512, num_layers=2):
27
+ super().__init__()
28
+ self.gru = torch.nn.GRU(input_dim, hidden_dim, num_layers=num_layers, batch_first=True)
29
+ self.linear2 = torch.nn.Linear(hidden_dim, 192)
30
+ self.linear3 = torch.nn.Linear(192, 2)
31
+ def forward(self, x):
32
+ out, _ = self.gru(x)
33
+ last = out[:, -1, :]
34
+ x = self.linear2(last)
35
+ x = self.linear3(x)
36
+ return x
37
+
38
+ # Load weights
39
+ model = LanNetBinary(cfg["input_dim"], cfg["hidden_dim"], cfg["num_layers"])
40
+ sd = load_safetensors(os.path.join(model_dir, "model.safetensors"))
41
+ model.load_state_dict(sd, strict=True)
42
+ model.eval()
43
+
44
+ # Feature extraction should match preprocessor_config.json
45
+ def fbanks_from_array(y, sr=pp["sampling_rate"], n_mels=pp["n_mels"], n_fft=pp["n_fft"], hop_length=pp["hop_length"], max_len=pp["max_len_frames"]):
46
+ mel = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length, power=2.0)
47
+ fbanks = librosa.power_to_db(mel).T
48
+ T = fbanks.shape[0]
49
+ if T < max_len:
50
+ import numpy as np
51
+ fbanks = np.pad(fbanks, ((0, max_len - T), (0, 0)), mode="constant")
52
+ else:
53
+ fbanks = fbanks[:max_len, :]
54
+ return torch.tensor(fbanks, dtype=torch.float32).unsqueeze(0) # (1, T, F)
55
+
56
+ # Example: predict from a waveform array "y" at 16kHz
57
+ # y, _ = librosa.load("example.wav", sr=pp["sampling_rate"])
58
+ # x = fbanks_from_array(y)
59
+ # with torch.no_grad():
60
+ # logits = model(x)
61
+ # pred = int(torch.argmax(logits, dim=1))
62
+ # print(lm[str(pred)])
config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "shanghai-binary",
3
+ "model_type": "gru-audio-binary",
4
+ "input_dim": 40,
5
+ "hidden_dim": 512,
6
+ "num_layers": 2,
7
+ "num_labels": 2,
8
+ "classifier_dims": [
9
+ 192,
10
+ 2
11
+ ],
12
+ "pooling": "last_timestep"
13
+ }
label_mapping.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "0": "not-shanghai",
3
+ "1": "shanghai"
4
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03a883bf09cf68c11a4a4f5076ad7d827f976fa6475d768101b747a23a59e087
3
+ size 10104032
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "feature_type": "fbank",
3
+ "sampling_rate": 16000,
4
+ "n_mels": 40,
5
+ "n_fft": 400,
6
+ "hop_length": 160,
7
+ "max_len_frames": 200,
8
+ "log_db": true,
9
+ "mono": true
10
+ }