Spaces:
Running
Running
| from dataclasses import dataclass, field, make_dataclass | |
| from enum import Enum | |
| from src.about import Tasks | |
| """ adapted from original template, where unnecessary code was removed | |
| util.py is used for defining our fixed columns, which will be referenced to from app.py | |
| ColumnContent dataclass used to define column properties""" | |
| def fields(raw_class): | |
| return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] | |
| class ColumnContent: | |
| name: str | |
| type: str | |
| displayed_by_default: bool | |
| hidden: bool = False | |
| never_hidden: bool = False | |
| ## Leaderboard columns | |
| auto_eval_column_dict = [] | |
| # Init | |
| auto_eval_column_dict.append(["technique", ColumnContent, field(default_factory=lambda: ColumnContent("Technique", "str", True, never_hidden=True))]) | |
| # Metric scores | |
| for task in Tasks: | |
| _task = task # capture loop variable | |
| auto_eval_column_dict.append([task.name, ColumnContent, field(default_factory=lambda t=_task: ColumnContent(t.value.col_name, "number", True))]) | |
| # We use make dataclass to dynamically fill the scores from Tasks | |
| AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)() | |
| ## For the queue columns in the submission tab | |
| class EvalQueueColumn: # Queue column | |
| model = ColumnContent("model", "markdown", True) | |
| revision = ColumnContent("revision", "str", True) | |
| private = ColumnContent("private", "bool", True) | |
| precision = ColumnContent("precision", "str", True) | |
| weight_type = ColumnContent("weight_type", "str", "Original") | |
| status = ColumnContent("status", "str", True) | |
| ## All the model information that we might need | |
| class ModelDetails: | |
| name: str | |
| display_name: str = "" | |
| symbol: str = "" # emoji | |
| class ModelType(Enum): | |
| PT = ModelDetails(name="pretrained", symbol="🟢") | |
| FT = ModelDetails(name="fine-tuned", symbol="🔶") | |
| IFT = ModelDetails(name="instruction-tuned", symbol="â•") | |
| RL = ModelDetails(name="RL-tuned", symbol="🟦") | |
| Unknown = ModelDetails(name="", symbol="?") | |
| def to_str(self, separator=" "): | |
| return f"{self.value.symbol}{separator}{self.value.name}" | |
| def from_str(type): | |
| if "fine-tuned" in type or "🔶" in type: | |
| return ModelType.FT | |
| if "pretrained" in type or "🟢" in type: | |
| return ModelType.PT | |
| if "RL-tuned" in type or "🟦" in type: | |
| return ModelType.RL | |
| if "instruction-tuned" in type or "â•" in type: | |
| return ModelType.IFT | |
| return ModelType.Unknown | |
| class WeightType(Enum): | |
| Adapter = ModelDetails("Adapter") | |
| Original = ModelDetails("Original") | |
| Delta = ModelDetails("Delta") | |
| class Precision(Enum): | |
| float16 = ModelDetails("float16") | |
| bfloat16 = ModelDetails("bfloat16") | |
| Unknown = ModelDetails("?") | |
| def from_str(precision): | |
| if precision in ["torch.float16", "float16"]: | |
| return Precision.float16 | |
| if precision in ["torch.bfloat16", "bfloat16"]: | |
| return Precision.bfloat16 | |
| return Precision.Unknown | |
| # Column selection | |
| COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden] | |
| BENCHMARK_COLS = [] | |