tangtang commited on
Commit
03ff9a5
·
1 Parent(s): 6be31de

Update space1

Browse files
Files changed (2) hide show
  1. app.py +5 -0
  2. src/about.py +0 -19
app.py CHANGED
@@ -93,7 +93,12 @@ demo = gr.Blocks(css=custom_css)
93
  with demo:
94
  gr.HTML(TITLE)
95
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
 
 
 
96
 
 
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
  leaderboard = init_leaderboard(LEADERBOARD_DF)
 
93
  with demo:
94
  gr.HTML(TITLE)
95
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
+ def display_radar_chart():
97
+ return """
98
+ <iframe src="https://tangxuemei1995.github.io/LitReview_reusults/clean.html" style="width: 100%; height: 500px; border: none;"></iframe>
99
+ """
100
 
101
+ gr.HTML(display_radar_chart())
102
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
103
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
104
  leaderboard = init_leaderboard(LEADERBOARD_DF)
src/about.py CHANGED
@@ -42,25 +42,6 @@ INTRODUCTION_TEXT = """
42
  This leaderboard evaluates Large Language Models (LLMs) on their ability to perform automated literature review tasks, including reference generation, abstract writing, and review composition.<br>
43
  It is based on the study: <b>Large Language Models for Automated Literature Review: An Evaluation of Reference Generation, Abstract Writing, and Review Composition.</b><br>
44
  The leaderboard measures how well different models perform in references generation, factually consistent, and stylistically appropriate academic texts.<br><br>
45
-
46
- <div style="display:flex; gap:20px; justify-content:space-between;">
47
- <div style="text-align:center;">
48
- <img src="https://huggingface.co/datasets/XuemeiTang/llm_litReview_images/resolve/main/acc_score.png?raw=true" width="200"><br>
49
- Reference Generation: Precision
50
- </div>
51
- <div style="text-align:center;">
52
- <img src="![Abstract Writing: True](https://huggingface.co/datasets/XuemeiTang/llm_litReview_images/resolve/main/t2_true_entailment.png?raw=true)" width="200"><br>
53
- Abstract Writing: True
54
- </div>
55
- <div style="text-align:center;">
56
- <img src="https://huggingface.co/datasets/XuemeiTang/llm_litReview_images/resolve/main/acc_score_t3.png?raw=true" width="200"><br>
57
- Review Composition: Precision
58
- </div>
59
- <div style="text-align:center;">
60
- <img src="https://huggingface.co/datasets/XuemeiTang/llm_litReview_images/resolve/main/kpr_score.png?raw=true" width="200"><br>
61
- Literature Review Writing: KPR
62
- </div>
63
- </div>
64
  """
65
 
66
 
 
42
  This leaderboard evaluates Large Language Models (LLMs) on their ability to perform automated literature review tasks, including reference generation, abstract writing, and review composition.<br>
43
  It is based on the study: <b>Large Language Models for Automated Literature Review: An Evaluation of Reference Generation, Abstract Writing, and Review Composition.</b><br>
44
  The leaderboard measures how well different models perform in references generation, factually consistent, and stylistically appropriate academic texts.<br><br>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  """
46
 
47