Speech-Arena-2025 commited on
Commit
5ecb433
·
verified ·
1 Parent(s): 66751c3

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -8,8 +8,6 @@
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
@@ -35,25 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.zip filter=lfs diff=lfs merge=lfs -text
36
  *.zst filter=lfs diff=lfs merge=lfs -text
37
  *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
11
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: DF Arena Test
3
+ emoji: 📉
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.14.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ import pandas as pd
5
+ import gradio as gr
6
+ from ui.leaderboard import render_leader_board, render_info_html
7
+ from ui.df_arena_tool import render_tool_info
8
+ from ui.submission import render_submission_page
9
+ import os
10
+ from utils import load_leaderboard
11
+ from huggingface_hub import snapshot_download
12
+ import gradio as gr
13
+ import os
14
+ import json
15
+
16
+ REPO_ID = os.getenv('REPO_ID')
17
+ DB_ERR_PATH = f'./data/data/leaderboard_err.csv'
18
+ DB_ACCURACY_PATH = f'./data/data/leaderboard_accuracy.csv'
19
+ CITATIONS_PATH = f'./data/data/model_citations.json'
20
+
21
+ if not os.path.exists('./data/data'):
22
+ snapshot_download(repo_id=REPO_ID,
23
+ repo_type="dataset", local_dir='./data/data')
24
+
25
+
26
+ with open(CITATIONS_PATH, 'r') as f:
27
+ model_citations = json.load(f)
28
+
29
+ # Load leaderboard data
30
+ leaderboard_df_err = load_leaderboard(DB_ERR_PATH)
31
+ leaderboard_df_accuracy = load_leaderboard(DB_ACCURACY_PATH)
32
+
33
+ # Function to load leaderboard data
34
+
35
+ custom_css = """
36
+ h1, {
37
+ font-size: 50px !important; /* Increase heading sizes */
38
+ line-height: 2.0 !important; /* Increase line spacing */
39
+ text-align: center !important; /* Center align headings */
40
+ }
41
+
42
+ .gradio-container {
43
+ padding: 30px !important; /* Increase padding around the UI */
44
+ }
45
+
46
+ .markdown-body p {
47
+ font-size: 30px !important; /* Increase text size */
48
+ line-height: 2.0 !important; /* More space between lines */
49
+ }
50
+
51
+ .gradio-container .gr-block {
52
+ margin-bottom: 20px !important; /* Add more space between elements */
53
+ }
54
+ """
55
+
56
+ # Gradio Interface Configuration
57
+ def create_ui():
58
+ with gr.Blocks(theme=gr.themes.Soft(text_size=gr.themes.sizes.text_lg), css=custom_css) as demo:
59
+ # gr.Markdown("# Speech Deep Fake Arena")
60
+ gr.Image('/data/code/DF_arena_leaderboard/leaderboard/data/df_arena.jpg')
61
+
62
+ with gr.Tabs():
63
+ with gr.Tab("🏆 Leaderboard"):
64
+ with gr.Column():
65
+ render_info_html()
66
+ gr.Markdown("Table for Equal Error Rate (EER %) for different systems")
67
+ render_leader_board(leaderboard_df_err, model_citations) # Adjust this to work with Gradio components
68
+ gr.Markdown("Table for Accuracy (EER %) for different systems")
69
+
70
+ render_leader_board(leaderboard_df_accuracy, model_citations)
71
+
72
+ with gr.Tab("🛠 Evaluation"):
73
+ render_tool_info()
74
+ with gr.Tab("📤 Submission"):
75
+ render_submission_page()
76
+
77
+ return demo
78
+
79
+
80
+ # Launch the app
81
+ create_ui().launch()
ui/__pycache__/df_arena_tool.cpython-39.pyc ADDED
Binary file (1.27 kB). View file
 
ui/__pycache__/leaderboard.cpython-39.pyc ADDED
Binary file (2.48 kB). View file
 
ui/__pycache__/submission.cpython-39.pyc ADDED
Binary file (900 Bytes). View file
 
ui/df_arena_tool.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def render_tool_info():
4
+ text = """
5
+ In order to streamline the evaluation process across many models and datasets, we
6
+ have developed df_arena_toolkit which can be used to compute score files for evaluation.
7
+ The tool can be found at https://github.com/hoanmyTran/deepfake_arena/tree/main.
8
+
9
+ ### Usage
10
+ #### 1. Data Preparation
11
+ Create metadata.csv with below format:
12
+ ```
13
+ file_name,label
14
+ /path/to/audio1,spoof
15
+ /path/to/audio2,bonafide
16
+ ...
17
+
18
+ ```
19
+ NOTE : The labels should contain "spoof" for spoofed samples and "bonafide" for real samples.
20
+ All the file_name paths should be absolute
21
+
22
+ #### 2. Evaluation
23
+
24
+ Example usage :
25
+ ```py
26
+ python evaluation.py --model_name wavlm_ecapa
27
+ --batch_size 32
28
+ --protocol_file_path /path/to/metadata.csv
29
+ --model_path /path/to/model.ckpt
30
+ --out_score_file_name scores.txt
31
+ --transforms pad
32
+ --num workers 4
33
+ ```
34
+
35
+
36
+
37
+
38
+ """
39
+ return gr.Markdown(text)
ui/leaderboard.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import pandas as pd
3
+ import gradio as gr
4
+ from utils import load_leaderboard
5
+ import numpy as np
6
+ from huggingface_hub import snapshot_download
7
+
8
+
9
+ def make_clickable(url, name):
10
+ return f'<a href="{url}" target="_blank">{name}</a>'
11
+
12
+ def render_info_html():
13
+ info_text = "With the growing advent of machine-generated speech, the scientific community is responding with exciting resources " \
14
+ "to detect deep fakes. With research moving at such a rapid pace, it becomes challenging to keep track of generalizability " \
15
+ "of SOTA DF detection systems. This leaderboard thus presents a comprehensive benchmark of 10 SOTA speech antispoofing " \
16
+ "systems across 13 popular speech deep fake detection datasets."
17
+
18
+ # HTML formatted info text
19
+ return gr.Markdown(info_text)
20
+
21
+ def highlight_min(s, props=''):
22
+ return np.where(s == np.nanmin(s.values), props, '')
23
+
24
+ def render_leader_board(leaderboard_df, model_citations):
25
+
26
+ if not leaderboard_df.empty:
27
+ # Convert model names and dataset names to clickable links
28
+ # leaderboard_df['System'] = leaderboard_df['System'].apply(lambda x: make_clickable(model_citations.get(x, "#"), x))
29
+ # leaderboard_df['Average'] = leaderboard_df.loc[:, ['In-the-wild','ASVSpoof2019','ASVSpoof2021LA','ASVSpoof2021DF','ASVSpoof2024-Dev','ASVSpoof2024-Eval','FakeOrReal','codecfake3','ADD2022','ADD2023','DFADD','LibriVoc','SONAR']].mean(axis=1)
30
+ leaderboard_df.insert(1, 'Average', leaderboard_df.loc[:, ['In-the-wild','ASVSpoof2019','ASVSpoof2021LA','ASVSpoof2021DF','ASVSpoof2024-Dev','ASVSpoof2024-Eval','FakeOrReal','codecfake3','ADD2022','ADD2023','DFADD','LibriVoc','SONAR']].mean(axis=1))
31
+
32
+ leaderboard_df = leaderboard_df.sort_values(by="Average", ascending=True).reset_index(drop=True)
33
+
34
+ # Assign rank emojis 🥇🥈🥉
35
+ leaderboard_df["System"] = leaderboard_df["System"].apply(lambda x: f"[{x}]({model_citations.get(x, '#')})")
36
+
37
+ emojis = ["🥇", "🥈", "🥉"]
38
+
39
+ leaderboard_df.System[0] = f"{emojis[0]} {leaderboard_df.System[0]}"
40
+ leaderboard_df.System[1] = f"{emojis[1]} {leaderboard_df.System[1]}"
41
+ leaderboard_df.System[2] = f"{emojis[2]} {leaderboard_df.System[2]}"
42
+ temp = leaderboard_df.loc[:, ['System', 'Training Data', 'Num Parameters', 'Data Augmentation']]
43
+ # styler = leaderboard_df.drop(columns=['Training Data', 'Num Parameters', 'Data Augmentation'], axis=1).style.highlight_min(color = 'lightgreen', axis = 0).format(precision=2)
44
+ styler = (
45
+ leaderboard_df
46
+ .drop(columns=['Training Data', 'Num Parameters', 'Data Augmentation'], axis=1).style \
47
+ .format(precision=2))
48
+ styler.apply(highlight_min, props='color:green', axis=0)
49
+
50
+ # styler['System'] = temp['System']
51
+ # styler['Training Data'] = temp['Training Data']
52
+ # styler['Num Parameters'] = temp['Num Parameters']
53
+ # styler['Data Augmentation'] = temp['Data Augmentation']
54
+ return gr.Dataframe(styler, datatype=['markdown'] * 1 + ['number'] * 14)
55
+ return gr.HTML(value="<p>No data available in the leaderboard.</p>")
ui/submission.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def render_submission_page():
4
+ text = """ Want to submit your own system to the leaderboard? Submit
5
+ all the scores files for your system across evaluation sets of the supported datasets at
6
+ <speech.arena.eval@gmail.com> and we will handle the rest. Ensure the filenames are named as follows for each datasets:
7
+
8
+ - asvspoof_2019.txt
9
+ - asvspoof_2021_df_eval.txt
10
+ - asvspoof_2021_la_eval.txt
11
+ - asvspoof_2024_dev.txt
12
+ - asvspoof_2024_eval.txt
13
+ - codecfake.txt
14
+ - fake_or_real.txt
15
+ - in_the_wild.txt
16
+ - sonar.txt
17
+ - dfadd.txt
18
+ - libri_voc.txt
19
+ - add_2022.txt
20
+ - add_2023.txt
21
+ """
22
+ return gr.Markdown(text)
23
+
24
+
25
+
utils.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ import pandas as pd
3
+ import streamlit as st
4
+
5
+ def load_leaderboard(db_path):
6
+ df = pd.read_csv(db_path) # Update table name if needed
7
+
8
+ return df