Spaces:
Runtime error
Runtime error
File size: 5,487 Bytes
180ffb5 a04b287 180ffb5 a04b287 180ffb5 a04b287 180ffb5 a04b287 180ffb5 a04b287 742dfc3 a04b287 ed90aae a04b287 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
from dataclasses import dataclass, make_dataclass, field
from enum import Enum
from src.about import Tasks
def fields(raw_class):
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modif is needed
@dataclass
class ColumnContent:
name: str
type: str
displayed_by_default: bool
hidden: bool = False
never_hidden: bool = False
## Leaderboard columns
auto_eval_column_dict = []
# Init
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, field(default_factory=lambda: ColumnContent("T", "str", True, never_hidden=True))])
auto_eval_column_dict.append(["model", ColumnContent, field(default_factory=lambda: ColumnContent("Model", "markdown", True, never_hidden=True))])
#Scores
auto_eval_column_dict.append(["average", ColumnContent, field(default_factory=lambda: ColumnContent("Average ⬆️", "number", True))])
for task in Tasks:
auto_eval_column_dict.append([task.name, ColumnContent, field(default_factory=lambda task=task: ColumnContent(task.value.col_name, "number", True))])
# Model information
auto_eval_column_dict.append(["model_type", ColumnContent, field(default_factory=lambda: ColumnContent("Type", "str", False))])
auto_eval_column_dict.append(["architecture", ColumnContent, field(default_factory=lambda: ColumnContent("Architecture", "str", False))])
auto_eval_column_dict.append(["weight_type", ColumnContent, field(default_factory=lambda: ColumnContent("Weight type", "str", False, True))])
auto_eval_column_dict.append(["precision", ColumnContent, field(default_factory=lambda: ColumnContent("Precision", "str", False))])
auto_eval_column_dict.append(["license", ColumnContent, field(default_factory=lambda: ColumnContent("Hub License", "str", False))])
auto_eval_column_dict.append(["params", ColumnContent, field(default_factory=lambda: ColumnContent("#Params (B)", "number", False))])
auto_eval_column_dict.append(["likes", ColumnContent, field(default_factory=lambda: ColumnContent("Hub ❤️", "number", False))])
auto_eval_column_dict.append(["still_on_hub", ColumnContent, field(default_factory=lambda: ColumnContent("Available on the hub", "bool", False))])
auto_eval_column_dict.append(["revision", ColumnContent, field(default_factory=lambda: ColumnContent("Model sha", "str", False, False))])
# We use make dataclass to dynamically fill the scores from Tasks
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
## For the queue columns in the submission tab
@dataclass(frozen=True)
class EvalQueueColumn: # Queue column
model = ColumnContent("model", "markdown", True)
revision = ColumnContent("revision", "str", True)
private = ColumnContent("private", "bool", True)
precision = ColumnContent("precision", "str", True)
weight_type = ColumnContent("weight_type", "str", "Original")
status = ColumnContent("status", "str", True)
## All the model information that we might need
@dataclass
class ModelDetails:
name: str
display_name: str = ""
symbol: str = "" # emoji
class ModelType(Enum):
PT = ModelDetails(name="pretrained", symbol="🟢")
FT = ModelDetails(name="fine-tuned", symbol="🔶")
IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
RL = ModelDetails(name="RL-tuned", symbol="🟦")
Unknown = ModelDetails(name="", symbol="?")
def to_str(self, separator=" "):
return f"{self.value.symbol}{separator}{self.value.name}"
@staticmethod
def from_str(type):
if "fine-tuned" in type or "🔶" in type:
return ModelType.FT
if "pretrained" in type or "🟢" in type:
return ModelType.PT
if "RL-tuned" in type or "🟦" in type:
return ModelType.RL
if "instruction-tuned" in type or "⭕" in type:
return ModelType.IFT
return ModelType.Unknown
class WeightType(Enum):
Adapter = ModelDetails("Adapter")
Original = ModelDetails("Original")
Delta = ModelDetails("Delta")
@staticmethod
def from_str(weight_type):
"""Convert string representation to WeightType enum value.
Args:
weight_type (str): The string representation of the weight type
Returns:
WeightType: The corresponding enum value
Raises:
ValueError: If the weight type is not recognized
"""
weight_type = str(weight_type).lower()
if weight_type == "adapter":
return WeightType.Adapter
elif weight_type == "original":
return WeightType.Original
elif weight_type == "delta":
return WeightType.Delta
raise ValueError(f"Unknown weight type: {weight_type}")
class Precision(Enum):
float16 = ModelDetails("float16")
bfloat16 = ModelDetails("bfloat16")
Unknown = ModelDetails("?")
def from_str(precision):
if precision in ["torch.float16", "float16"]:
return Precision.float16
if precision in ["torch.bfloat16", "bfloat16"]:
return Precision.bfloat16
return Precision.Unknown
# Column selection
COLS = [c.name for c in fields(AutoEvalColumn()) if not c.hidden]
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|