Spaces:
Runtime error
Runtime error
Commit ·
bed94b8
1
Parent(s): bb27043
Update space
Browse files- app.py +16 -16
- src/about.py +1 -1
app.py
CHANGED
|
@@ -118,24 +118,24 @@ def create_level_tab(level: int, full_df: pd.DataFrame, cols: list, benchmark_co
|
|
| 118 |
level_df = get_leaderboard_data(level, full_df, cols, benchmark_cols)
|
| 119 |
level_leaderboard = init_leaderboard(level_df)
|
| 120 |
|
| 121 |
-
# 添加导出按钮
|
| 122 |
-
with gr.Row():
|
| 123 |
-
|
| 124 |
-
|
| 125 |
|
| 126 |
-
def export_data():
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
|
| 134 |
-
export_button.click(
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
)
|
| 139 |
|
| 140 |
# 主界面
|
| 141 |
demo = gr.Blocks(css=custom_css)
|
|
|
|
| 118 |
level_df = get_leaderboard_data(level, full_df, cols, benchmark_cols)
|
| 119 |
level_leaderboard = init_leaderboard(level_df)
|
| 120 |
|
| 121 |
+
# # 添加导出按钮
|
| 122 |
+
# with gr.Row():
|
| 123 |
+
# export_button = gr.Button(f"Export Level {level} Data")
|
| 124 |
+
# export_status = gr.Markdown()
|
| 125 |
|
| 126 |
+
# def export_data():
|
| 127 |
+
# try:
|
| 128 |
+
# filename = f"level_{level}_leaderboard.csv"
|
| 129 |
+
# level_df.to_csv(filename, index=False)
|
| 130 |
+
# return f"✅ Data exported to {filename}"
|
| 131 |
+
# except Exception as e:
|
| 132 |
+
# return f"❌ Fail to export: {str(e)}"
|
| 133 |
|
| 134 |
+
# export_button.click(
|
| 135 |
+
# fn=export_data,
|
| 136 |
+
# inputs=[],
|
| 137 |
+
# outputs=[export_status]
|
| 138 |
+
# )
|
| 139 |
|
| 140 |
# 主界面
|
| 141 |
demo = gr.Blocks(css=custom_css)
|
src/about.py
CHANGED
|
@@ -104,7 +104,7 @@ TITLE = """<h1 align="center" id="space-title">SafeLawBench Leaderboard</h1>"""
|
|
| 104 |
|
| 105 |
# What does your leaderboard evaluate?
|
| 106 |
INTRODUCTION_TEXT = """
|
| 107 |
-
We introduced SafeLawBench, a three-tiered safety evaluation benchmark developed from hierarchical clustering of real-world legal materials. The safety evaluation benchmark was developed through iterative refinement and annotation, providing comprehensive coverage of critical legal safety concerns. According to the severity of legal safety, we divided our tasks into four ranks, including
|
| 108 |
"""
|
| 109 |
|
| 110 |
# Which evaluations are you running? how can people reproduce what you have?
|
|
|
|
| 104 |
|
| 105 |
# What does your leaderboard evaluate?
|
| 106 |
INTRODUCTION_TEXT = """
|
| 107 |
+
We introduced SafeLawBench, a three-tiered safety evaluation benchmark developed from hierarchical clustering of real-world legal materials. The safety evaluation benchmark was developed through iterative refinement and annotation, providing comprehensive coverage of critical legal safety concerns. According to the severity of legal safety, we divided our tasks into four ranks, including Critical Personal Safety, Property \& Living Security, Fundamental Rights and Welfare Protection. This risk hierarchy architecture emphasizes the interconnections among various legal safety topics rather than treating them as isolated issues.
|
| 108 |
"""
|
| 109 |
|
| 110 |
# Which evaluations are you running? how can people reproduce what you have?
|