File size: 4,561 Bytes
4fd373e
81fef9c
 
 
 
69dc570
81fef9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fd373e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81fef9c
4fd373e
 
 
81fef9c
4fd373e
 
 
 
 
 
 
 
 
 
81fef9c
 
 
4fd373e
 
81fef9c
 
 
 
 
 
 
69dc570
81fef9c
 
 
 
 
 
 
 
 
 
 
69dc570
 
 
 
81fef9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from dataclasses import dataclass, field, make_dataclass
from enum import Enum

from src.about import Tasks


def fields(raw_class):
    return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]


# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modif is needed
@dataclass
class ColumnContent:
    name: str
    type: str
    displayed_by_default: bool
    hidden: bool = False
    never_hidden: bool = False

## Leaderboard columns
auto_eval_column_dict = []


def column_field(name: str, type: str, displayed_by_default: bool, hidden: bool = False, never_hidden: bool = False):
    return (
        ColumnContent,
        field(
            default_factory=lambda: ColumnContent(
                name,
                type,
                displayed_by_default,
                hidden,
                never_hidden,
            )
        ),
    )


# Init
auto_eval_column_dict.append(["model_type_symbol", *column_field("T", "str", True, never_hidden=True)])
auto_eval_column_dict.append(["model", *column_field("Model", "markdown", True, never_hidden=True)])
auto_eval_column_dict.append(["average", *column_field("Average ⬆️", "number", True)])
for task in Tasks:
    auto_eval_column_dict.append([task.name, *column_field(task.value.col_name, "number", True)])
auto_eval_column_dict.append(["model_type", *column_field("Type", "str", False)])
auto_eval_column_dict.append(["architecture", *column_field("Architecture", "str", False)])
auto_eval_column_dict.append(["weight_type", *column_field("Weight type", "str", False, True)])
auto_eval_column_dict.append(["precision", *column_field("Precision", "str", False)])
auto_eval_column_dict.append(["license", *column_field("Hub License", "str", False)])
auto_eval_column_dict.append(["params", *column_field("#Params (B)", "number", False)])
auto_eval_column_dict.append(["likes", *column_field("Hub ❤️", "number", False)])
auto_eval_column_dict.append(["still_on_hub", *column_field("Available on the hub", "bool", False)])
auto_eval_column_dict.append(["revision", *column_field("Model sha", "str", False, False)])

# We use make dataclass to dynamically fill the scores from Tasks
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
for field_name, _, field_def in auto_eval_column_dict:
    setattr(AutoEvalColumn, field_name, field_def.default_factory())

## For the queue columns in the submission tab
@dataclass(frozen=True)
class EvalQueueColumn:  # Queue column
    model = ColumnContent("model", "markdown", True)
    revision = ColumnContent("revision", "str", True)
    precision = ColumnContent("precision", "str", True)
    submitted_time = ColumnContent("submitted_time", "str", True)
    status = ColumnContent("status", "str", True)

## All the model information that we might need
@dataclass
class ModelDetails:
    name: str
    display_name: str = ""
    symbol: str = "" # emoji


class ModelType(Enum):
    PT = ModelDetails(name="pretrained", symbol="P")
    FT = ModelDetails(name="fine-tuned", symbol="F")
    IFT = ModelDetails(name="instruction-tuned", symbol="I")
    RL = ModelDetails(name="RL-tuned", symbol="R")
    Unknown = ModelDetails(name="", symbol="?")

    def to_str(self, separator=" "):
        return f"{self.value.symbol}{separator}{self.value.name}"

    @staticmethod
    def from_str(type):
        if "fine-tuned" in type or "🔶" in type:
            return ModelType.FT
        if "pretrained" in type or "🟢" in type:
            return ModelType.PT
        if "RL-tuned" in type or "🟦" in type:
            return ModelType.RL
        if "instruction-tuned" in type or "⭕" in type:
            return ModelType.IFT
        return ModelType.Unknown

class WeightType(Enum):
    Adapter = ModelDetails("Adapter")
    Original = ModelDetails("Original")
    Delta = ModelDetails("Delta")

class Precision(Enum):
    float16 = ModelDetails("float16")
    bfloat16 = ModelDetails("bfloat16")
    Unknown = ModelDetails("?")

    def from_str(precision):
        if precision in ["torch.float16", "float16"]:
            return Precision.float16
        if precision in ["torch.bfloat16", "bfloat16"]:
            return Precision.bfloat16
        return Precision.Unknown

# Column selection
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]

EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]

BENCHMARK_COLS = [t.value.col_name for t in Tasks]