Spaces:
Runtime error
Runtime error
| from dataclasses import dataclass, make_dataclass | |
| from enum import Enum | |
| import pandas as pd | |
| from src.about import Tasks | |
| def fields(raw_class): | |
| return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] | |
| # These classes are for user facing column names, | |
| # to avoid having to change them all around the code | |
| # when a modif is needed | |
| class ColumnContent: | |
| name: str | |
| type: str | |
| displayed_by_default: bool | |
| hidden: bool = False | |
| never_hidden: bool = False | |
| ## Leaderboard columns | |
| auto_eval_column_dict = [] | |
| # Init | |
| #auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "markdown", True, never_hidden=True)]) | |
| auto_eval_column_dict.append(["model_name", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)]) | |
| auto_eval_column_dict.append(["paper", ColumnContent, ColumnContent("Paper", "markdown", False)]) | |
| auto_eval_column_dict.append(["training_dataset_type", ColumnContent, ColumnContent("Training Dataset Type", "markdown", False, hidden=True)]) | |
| auto_eval_column_dict.append(["training_dataset", ColumnContent, ColumnContent("Training Dataset", "markdown", True, never_hidden=True)]) | |
| #Scores | |
| for task in Tasks: | |
| auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)]) | |
| # Model information | |
| #auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "markdown", False)]) | |
| auto_eval_column_dict.append(["model_backbone_type", ColumnContent, ColumnContent("Model Backbone Type", "markdown", False, hidden=True)]) | |
| auto_eval_column_dict.append(["model_backbone", ColumnContent, ColumnContent("Model Backbone", "str", True)]) | |
| auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "markdown", False)]) | |
| auto_eval_column_dict.append(["model_parameters", ColumnContent, ColumnContent("Parameter Count", "markdown", False)]) | |
| auto_eval_column_dict.append(["model_link", ColumnContent, ColumnContent("Link To Model", "markdown", True)]) | |
| auto_eval_column_dict.append(["testing_type", ColumnContent, ColumnContent("Testing Type", "str", False, hidden=True)]) | |
| # We use make dataclass to dynamically fill the scores from Tasks | |
| AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True) | |
| ## For the queue columns in the submission tab | |
| class EvalQueueColumn: # Queue column | |
| model = ColumnContent("model", "str", True) | |
| precision = ColumnContent("precision", "str", True) | |
| training_dataset = ColumnContent("training_dataset", "str", True) | |
| testing_type = ColumnContent("testing_type", "str", True) | |
| status = ColumnContent("status", "str", True) | |
| ## All the model information that we might need | |
| class ModelDetails: | |
| name: str | |
| display_name: str = "" | |
| symbol: str = "" # emoji | |
| class ModelType(Enum): | |
| PT = ModelDetails(name="pretrained", symbol="🟢") | |
| FT = ModelDetails(name="fine-tuned", symbol="🔶") | |
| IFT = ModelDetails(name="instruction-tuned", symbol="â•") | |
| RL = ModelDetails(name="RL-tuned", symbol="🟦") | |
| Other = ModelDetails(name="Other", symbol="?") | |
| def to_str(self, separator=" "): | |
| return f"{self.value.symbol}{separator}{self.value.name}" | |
| def from_str(type): | |
| if "fine-tuned" in type or "🔶" in type: | |
| return ModelType.FT | |
| if "pretrained" in type or "🟢" in type: | |
| return ModelType.PT | |
| if "RL-tuned" in type or "🟦" in type: | |
| return ModelType.RL | |
| if "instruction-tuned" in type or "â•" in type: | |
| return ModelType.IFT | |
| return ModelType.Other | |
| class Precision(Enum): | |
| float32 = "float32" | |
| Other = "Other" | |
| def from_str(precision): | |
| if precision in ["torch.float32", "float32"]: | |
| return Precision.float32 | |
| return Precision.Other | |
| # Column selection | |
| COLS = [c.name for c in fields(AutoEvalColumn)] | |
| EVAL_COLS = [c.name for c in fields(EvalQueueColumn)] | |
| EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)] | |
| BENCHMARK_COLS = [t.value.col_name for t in Tasks] | |