Update app.py
Browse files
app.py
CHANGED
|
@@ -65,38 +65,59 @@ def main():
|
|
| 65 |
st.markdown("Leaderboard made with [π§ LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite. It's a collection of my own evaluations.")
|
| 66 |
|
| 67 |
content = create_yall()
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
if __name__ == "__main__":
|
| 102 |
main()
|
|
|
|
| 65 |
st.markdown("Leaderboard made with [π§ LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite. It's a collection of my own evaluations.")
|
| 66 |
|
| 67 |
content = create_yall()
|
| 68 |
+
tab1, tab2 = st.tabs(["π Leaderboard", "π About"])
|
| 69 |
+
|
| 70 |
+
# Leaderboard tab
|
| 71 |
+
with tab1:
|
| 72 |
+
if content:
|
| 73 |
+
try:
|
| 74 |
+
score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
|
| 75 |
+
|
| 76 |
+
# Display dataframe
|
| 77 |
+
df = convert_markdown_table_to_dataframe(content)
|
| 78 |
+
for col in score_columns:
|
| 79 |
+
df[col] = pd.to_numeric(df[col].str.strip(), errors='coerce')
|
| 80 |
+
st.dataframe(df, use_container_width=True)
|
| 81 |
+
|
| 82 |
+
# Full-width plot for the first category
|
| 83 |
+
create_bar_chart(df, score_columns[0])
|
| 84 |
+
|
| 85 |
+
# Next two plots in two columns
|
| 86 |
+
col1, col2 = st.columns(2)
|
| 87 |
+
with col1:
|
| 88 |
+
create_bar_chart(df, score_columns[1])
|
| 89 |
+
with col2:
|
| 90 |
+
create_bar_chart(df, score_columns[2])
|
| 91 |
+
|
| 92 |
+
# Last two plots in two columns
|
| 93 |
+
col3, col4 = st.columns(2)
|
| 94 |
+
with col3:
|
| 95 |
+
create_bar_chart(df, score_columns[3])
|
| 96 |
+
with col4:
|
| 97 |
+
create_bar_chart(df, score_columns[4])
|
| 98 |
+
|
| 99 |
+
except Exception as e:
|
| 100 |
+
st.error("An error occurred while processing the markdown table.")
|
| 101 |
+
st.error(str(e))
|
| 102 |
+
else:
|
| 103 |
+
st.error("Failed to download the content from the URL provided.")
|
| 104 |
+
|
| 105 |
+
# About tab
|
| 106 |
+
with tab2:
|
| 107 |
+
st.markdown('''
|
| 108 |
+
## Nous benchmark suite
|
| 109 |
+
|
| 110 |
+
Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
|
| 111 |
+
|
| 112 |
+
* [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
|
| 113 |
+
* **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
|
| 114 |
+
* [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
|
| 115 |
+
* [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
|
| 116 |
+
|
| 117 |
+
## Reproducibility
|
| 118 |
+
|
| 119 |
+
You can easily reproduce these results using [π§ LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
|
| 120 |
+
''')
|
| 121 |
|
| 122 |
if __name__ == "__main__":
|
| 123 |
main()
|