File size: 4,746 Bytes
40b3335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#!/usr/bin/env python3
"""
Script to deploy the Watermark Leaderboard to Hugging Face Spaces
"""

import os
import shutil
import json
from pathlib import Path

def copy_files_to_hf_directory():
    """Copy necessary files to the Hugging Face deployment directory"""
    
    # Files to copy from the main project
    source_dir = Path("../")
    hf_dir = Path(".")
    
    # Essential files for Hugging Face deployment
    files_to_copy = [
        "app.py",
        "requirements.txt", 
        "README.md",
        "leaderboard.json"
    ]
    
    # Copy Reproducibility folder if it exists
    reproducibility_source = source_dir / "Reproducibility"
    if reproducibility_source.exists():
        reproducibility_dest = hf_dir / "Reproducibility"
        if reproducibility_dest.exists():
            shutil.rmtree(reproducibility_dest)
        shutil.copytree(reproducibility_source, reproducibility_dest)
        print("βœ… Copied Reproducibility folder")
    
    # Copy individual files
    for file_name in files_to_copy:
        source_file = source_dir / file_name
        dest_file = hf_dir / file_name
        
        if source_file.exists():
            shutil.copy2(source_file, dest_file)
            print(f"βœ… Copied {file_name}")
        else:
            print(f"⚠️  {file_name} not found in source directory")
    
    print("\nπŸŽ‰ Files copied successfully!")
    print("\nNext steps:")
    print("1. Create a new Hugging Face Space")
    print("2. Upload all files in this directory")
    print("3. Set the Space to use Gradio SDK")
    print("4. Your leaderboard will be live!")

def create_hf_readme():
    """Create a Hugging Face specific README"""
    readme_content = """---
title: Watermark Leaderboard
emoji: πŸ†
colorFrom: blue
colorTo: green
sdk: gradio
sdk_version: "4.44.0"
app_file: app.py
pinned: false
license: mit
short_description: Interactive leaderboard for watermark performance evaluation
---

# Watermark Leaderboard πŸ†

An interactive leaderboard for comparing watermark performance across different models and evaluation settings.

## Features

- **Interactive Scatter Plot**: Visualize watermark performance with Plotly charts
- **Performance Table**: Detailed metrics with sorting and filtering
- **Multiple Evaluation Settings**: Attack-free, Watermark Removal, and Stealing Attack
- **Model Support**: LLaMA3 and DeepSeek models
- **Dynamic Filtering**: Real-time updates based on model and metric selection
- **Flexible Submissions**: Submit data for any combination of attack types
- **Pending Approval System**: All submissions reviewed before appearing on leaderboard
- **Complete Field Visibility**: Administrators see all submission details for review
- **Professional UI**: Clean, modern interface with accordion sections
- **Reproducibility**: Access to all evaluation codes and guidelines

## How to Use

1. **Select Model**: Choose between LLaMA3 or DeepSeek
2. **Choose Setting**: Pick from Attack-free, Watermark Removal, or Stealing Attack
3. **View Results**: Explore the scatter plot and detailed table
4. **Submit Data**: Click "Add Your Data" to submit new results
   - Submit any combination of attack types (Attack-free, Watermark Removal, Stealing Attack)
   - All submissions go through approval process before appearing on leaderboard
5. **Administrator Review**: Administrators can review pending submissions with full field visibility

## Metrics Explained

- **Normalized Utility ↑**: Higher values indicate better text quality
- **Detection Rate (%) ↑**: Higher values indicate better watermark detection
- **Absolute Utility Degradation ↑**: Higher values indicate better resistance to removal attacks
- **Adversary BERT Score ↑**: Higher values indicate better performance under adversarial conditions

## Contributing

We encourage researchers to contribute their evaluation results. Please follow the guidelines in the "Guidelines" section for submission requirements.

## License

MIT License

---
*Last updated: December 2024*
"""
    
    with open("README.md", "w", encoding="utf-8") as f:
        f.write(readme_content)
    
    print("βœ… Created Hugging Face README.md")

def main():
    """Main deployment function"""
    print("πŸš€ Preparing Watermark Leaderboard for Hugging Face deployment...")
    
    # Create HF README
    create_hf_readme()
    
    # Copy files
    copy_files_to_hf_directory()
    
    print("\nπŸ“‹ Deployment Checklist:")
    print("βœ… All files prepared")
    print("βœ… Requirements.txt updated")
    print("βœ… README.md created for Hugging Face")
    print("βœ… Reproducibility codes included")
    
    print("\n🌐 Ready for Hugging Face Spaces deployment!")

if __name__ == "__main__":
    main()