davesalvi commited on
Commit
5a773fc
·
1 Parent(s): 0b47152

LCNN all data aug

Browse files
Files changed (2) hide show
  1. .gitignore +2 -1
  2. script.py +64 -63
.gitignore CHANGED
@@ -4,7 +4,7 @@ checkpoints/
4
  debug.sh
5
  data_test/
6
 
7
- .idea
8
 
9
  # Byte-compiled / optimized / DLL files
10
  __pycache__/
@@ -180,3 +180,4 @@ cython_debug/
180
 
181
  # PyPI configuration file
182
  .pypirc
 
 
4
  debug.sh
5
  data_test/
6
 
7
+ .idea/
8
 
9
  # Byte-compiled / optimized / DLL files
10
  __pycache__/
 
180
 
181
  # PyPI configuration file
182
  .pypirc
183
+ /checkpoints/
script.py CHANGED
@@ -2,10 +2,10 @@ import io
2
  import time
3
  import os
4
 
5
- # os.environ['OMP_NUM_THREADS'] = '1'
6
- # os.environ['OPENBLAS_NUM_THREADS'] = '1'
7
- # os.environ['MKL_NUM_THREADS'] = '1'
8
- # os.environ['NUMEXPR_NUM_THREADS'] = '1'
9
 
10
  import torch
11
  import tqdm.auto as tqdm
@@ -51,30 +51,31 @@ print('Define Model')
51
  # model_path = './checkpoints/RAWNET_ASVSPOOF_FOR_INTHEWILD_PURDUE.pth'
52
  # model.load_state_dict(torch.load(model_path, map_location=device))
53
 
54
- # # LCNN MODEL
55
- # model = LCNN(return_emb=False).to(device)
56
- # # model_path = './checkpoints/LCNN_ASVSPOOF_FOR_INTHEWILD_PURDUE.pth'
57
  # model_path = './checkpoints/LCNN_ALL_DATA.pth'
58
- # model.load_state_dict(torch.load(model_path, map_location=device))
59
-
60
- # MOE MODEL
61
- expert_1 = LCNN(return_emb=True).to(device)
62
- expert_2 = LCNN(return_emb=True).to(device)
63
- expert_3 = LCNN(return_emb=True).to(device)
64
- expert_4 = LCNN(return_emb=True).to(device)
65
- expert_5 = LCNN(return_emb=True).to(device)
66
- expert_6 = LCNN(return_emb=True).to(device)
67
 
68
- # # model = UltimateMOE(experts=[expert_1, expert_2, expert_3, expert_4])
69
- # # model_path = './checkpoints/MOE_ULTIMATE.pth'
 
 
 
 
 
70
  #
71
- # model = MOE_attention(experts=[expert_1, expert_2, expert_3, expert_4, expert_5, expert_6], device=device)
72
- # # model_path = './checkpoints/MOE_ATTENTION.pth'
73
- # model_path = './checkpoints/MOE_TRANSF.pth'
74
-
75
- expert_7 = LCNN(return_emb=True).to(device)
76
- model = MOE_attention(experts=[expert_1, expert_2, expert_3, expert_4, expert_5, expert_6, expert_7], device=device, freezing=True)
77
- model_path = './checkpoints/MOE_TRANSF_7EXP.pth'
 
 
 
78
 
79
 
80
  model = (model).to(device)
@@ -102,44 +103,44 @@ for el in tqdm.tqdm(dataset_remote):
102
 
103
  # if you are using libraries that expect a file. You can use BytesIO object
104
 
105
- try:
106
-
107
- # RUNNING ON HUGGINGFACE
108
- file_like = io.BytesIO(el["audio"]["bytes"])
109
- tensor = preprocess(file_like)
110
- # # RUNNING LOCALLY
111
- # tensor = preprocess(el)
112
-
113
- with torch.no_grad():
114
- # soft decision (such as log likelihood score)
115
- # positive score correspond to synthetic prediction
116
- # negative score correspond to pristine prediction
117
-
118
- # # OLD MODEL
119
- # score = model(tensor.to(device)).cpu().item()
120
- # CUSTOM MODEL
121
- score = model(tensor.to(device))[:, 1].cpu()
122
- print(f'SCORE OUT: {score}')
123
- score = score.mean().item()
124
- print(f'SCORE FINAL: {score}')
125
-
126
- # we require a hard decision to be submited. so you need to pick a threshold
127
- pred = "generated" if score > model.threshold else "pristine"
128
-
129
- # append your prediction
130
- # "id" and "pred" are required. "score" will not be used in scoring but we encourage you to include it. We'll use it for analysis of the results
131
-
132
- # RUNNING ON HUGGINGFACE
133
- out.append(dict(id=el["id"], pred=pred, score=score, time=time.time() - start_time))
134
- ## RUNNING LOCALLY
135
- # out.append(dict(id=el, pred=pred, score=score, time=time.time() - start_time))
136
-
137
- except Exception as e:
138
- print(e)
139
- print("failed", el["id"])
140
- out.append(dict(id=el["id"], pred="none", score=None))
141
- # print("failed", el)
142
- # out.append(dict(id=el, pred="none", score=None))
143
 
144
  # save the final result and that's it
145
  pd.DataFrame(out).to_csv("submission.csv", index=False)
 
2
  import time
3
  import os
4
 
5
+ os.environ['OMP_NUM_THREADS'] = '1'
6
+ os.environ['OPENBLAS_NUM_THREADS'] = '1'
7
+ os.environ['MKL_NUM_THREADS'] = '1'
8
+ os.environ['NUMEXPR_NUM_THREADS'] = '1'
9
 
10
  import torch
11
  import tqdm.auto as tqdm
 
51
  # model_path = './checkpoints/RAWNET_ASVSPOOF_FOR_INTHEWILD_PURDUE.pth'
52
  # model.load_state_dict(torch.load(model_path, map_location=device))
53
 
54
+ # LCNN MODEL
55
+ model = LCNN(return_emb=False).to(device)
56
+ # model_path = './checkpoints/LCNN_ASVSPOOF_FOR_INTHEWILD_PURDUE.pth'
57
  # model_path = './checkpoints/LCNN_ALL_DATA.pth'
58
+ model_path = './checkpoints/LCNN_ALL_DATA_AUG.pth'
59
+ model.load_state_dict(torch.load(model_path, map_location=device))
 
 
 
 
 
 
 
60
 
61
+ # # MOE MODEL
62
+ # expert_1 = LCNN(return_emb=True).to(device)
63
+ # expert_2 = LCNN(return_emb=True).to(device)
64
+ # expert_3 = LCNN(return_emb=True).to(device)
65
+ # expert_4 = LCNN(return_emb=True).to(device)
66
+ # expert_5 = LCNN(return_emb=True).to(device)
67
+ # expert_6 = LCNN(return_emb=True).to(device)
68
  #
69
+ # # # model = UltimateMOE(experts=[expert_1, expert_2, expert_3, expert_4])
70
+ # # # model_path = './checkpoints/MOE_ULTIMATE.pth'
71
+ #
72
+ # # model = MOE_attention(experts=[expert_1, expert_2, expert_3, expert_4, expert_5, expert_6], device=device)
73
+ # # # model_path = './checkpoints/MOE_ATTENTION.pth'
74
+ # # model_path = './checkpoints/MOE_TRANSF.pth'
75
+ #
76
+ # expert_7 = LCNN(return_emb=True).to(device)
77
+ # model = MOE_attention(experts=[expert_1, expert_2, expert_3, expert_4, expert_5, expert_6, expert_7], device=device, freezing=True)
78
+ # model_path = './checkpoints/MOE_TRANSF_7EXP.pth'
79
 
80
 
81
  model = (model).to(device)
 
103
 
104
  # if you are using libraries that expect a file. You can use BytesIO object
105
 
106
+ # try:
107
+
108
+ # RUNNING ON HUGGINGFACE
109
+ file_like = io.BytesIO(el["audio"]["bytes"])
110
+ tensor = preprocess(file_like)
111
+ # # RUNNING LOCALLY
112
+ # tensor = preprocess(el)
113
+
114
+ with torch.no_grad():
115
+ # soft decision (such as log likelihood score)
116
+ # positive score correspond to synthetic prediction
117
+ # negative score correspond to pristine prediction
118
+
119
+ # # OLD MODEL
120
+ # score = model(tensor.to(device)).cpu().item()
121
+ # CUSTOM MODEL
122
+ score = model(tensor.to(device))[:, 1].cpu()
123
+ print(f'SCORE OUT: {score}')
124
+ score = score.mean().item()
125
+ print(f'SCORE FINAL: {score}')
126
+
127
+ # we require a hard decision to be submited. so you need to pick a threshold
128
+ pred = "generated" if score > model.threshold else "pristine"
129
+
130
+ # append your prediction
131
+ # "id" and "pred" are required. "score" will not be used in scoring but we encourage you to include it. We'll use it for analysis of the results
132
+
133
+ # RUNNING ON HUGGINGFACE
134
+ out.append(dict(id=el["id"], pred=pred, score=score, time=time.time() - start_time))
135
+ # # RUNNING LOCALLY
136
+ # out.append(dict(id=el, pred=pred, score=score, time=time.time() - start_time))
137
+
138
+ # except Exception as e:
139
+ # print(e)
140
+ # print("failed", el["id"])
141
+ # out.append(dict(id=el["id"], pred="none", score=None))
142
+ # # print("failed", el)
143
+ # # out.append(dict(id=el, pred="none", score=None))
144
 
145
  # save the final result and that's it
146
  pd.DataFrame(out).to_csv("submission.csv", index=False)