|
|
from dataclasses import dataclass |
|
|
from enum import Enum |
|
|
|
|
|
@dataclass |
|
|
class Task: |
|
|
benchmark: str |
|
|
metric: str |
|
|
col_name: str |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Tasks(Enum): |
|
|
|
|
|
task0 = Task("anli_r1", "acc", "ANLI") |
|
|
task1 = Task("logiqa", "acc_norm", "LogiQA") |
|
|
|
|
|
NUM_FEWSHOT = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TITLE = """<h1 align="center" id="space-title">Eval-Anything Leaderboard</h1>""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
INTRODUCTION_TEXT = """ |
|
|
Eval-anything is a framework designed specifically for evaluating all-modality models, and it is a part of the [Align-Anything](https://github.com/PKU-Alignment/align-anything) framework. It consists of two main tasks: All-Modality Understanding (AMU) and All-Modality Generation (AMG). AMU assesses a model's ability to simultaneously process and integrate information from all modalities, including text, images, audio, and video. On the other hand, AMG evaluates a model's capability to autonomously select output modalities based on user instructions and synergistically utilize different modalities to generate output. Eval-anything aims to comprehensively assess the ability of all-modality models to handle heterogeneous data from multiple sources, providing a reliable evaluation tool for this field. |
|
|
|
|
|
**Note:** Since most current open-source models lack support for all-modality output, (†) indicates that models are used as agents to invoke [AudioLDM2-Large](https://huggingface.co/cvssp/audioldm2-large) and [FLUX.1-schnell](https://huggingface.co/black-forest-labs/FLUX.1-schnell) for audio and image generation. |
|
|
""" |
|
|
|
|
|
|
|
|
LLM_BENCHMARKS_TEXT = f""" |
|
|
""" |
|
|
|
|
|
EVALUATION_QUEUE_TEXT = """ |
|
|
""" |
|
|
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
|
CITATION_BUTTON_TEXT = """ |
|
|
@misc{align_anything, |
|
|
author = {PKU-Alignment Team}, |
|
|
title = {Align Anything: training all modality models to follow instructions with unified language feedback}, |
|
|
year = {2024}, |
|
|
publisher = {GitHub}, |
|
|
journal = {GitHub repository}, |
|
|
howpublished = {\\url{https://github.com/PKU-Alignment/align-anything}}, |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
ABOUT_TEXT = """ |
|
|
We will provide methods to upload more model evaluation results in the future. |
|
|
""" |