from dataclasses import dataclass, make_dataclass from enum import Enum import pandas as pd from src.about import Tasks def fields(raw_class): return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] from dataclasses import dataclass, make_dataclass @dataclass class ColumnContent: name: str type: str displayed_by_default: bool hidden: bool = False never_hidden: bool = False # Dynamic fields based on new data schema # metrics = ['precision', 'recall', 'f1_score', 'accuracy', 'yes_percent'] metrics = ['accuracy'] auto_eval_column_dict = [ ["model_name", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)] ] # Append metrics as dynamic columns for metric in metrics: auto_eval_column_dict.append([metric, ColumnContent, ColumnContent(metric.capitalize() + " ⬆️", "number", True)]) # Model information remains the same as needed auto_eval_column_dict += [ ["model_type", ColumnContent, ColumnContent("Type", "str", False)], ["architecture", ColumnContent, ColumnContent("Architecture", "str", False)], ["precision", ColumnContent, ColumnContent("Precision", "str", False)], # Overlap with new metric name; might need renaming in context ["license", ColumnContent, ColumnContent("Hub License", "str", False)], ["params", ColumnContent, ColumnContent("#Params (B)", "number", False)], ["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)], ["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)], ["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)] ] # Make the dataclass for AutoEvalColumn AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True) ## For the queue columns in the submission tab @dataclass(frozen=True) class EvalQueueColumn: # Queue column model = ColumnContent("model", "markdown", True) revision = ColumnContent("revision", "str", True) private = ColumnContent("private", "bool", True) precision = ColumnContent("precision", "str", True) weight_type = ColumnContent("weight_type", "str", "Original") status = ColumnContent("status", "str", True) ## All the model information that we might need @dataclass class ModelDetails: name: str display_name: str = "" symbol: str = "" # emoji class ModelType(Enum): PT = ModelDetails(name="pretrained", symbol="🟢") FT = ModelDetails(name="fine-tuned", symbol="🔶") IFT = ModelDetails(name="instruction-tuned", symbol="⭕") RL = ModelDetails(name="RL-tuned", symbol="🟦") Unknown = ModelDetails(name="", symbol="?") def to_str(self, separator=" "): return f"{self.value.symbol}{separator}{self.value.name}" @staticmethod def from_str(type): if "fine-tuned" in type or "🔶" in type: return ModelType.FT if "pretrained" in type or "🟢" in type: return ModelType.PT if "RL-tuned" in type or "🟦" in type: return ModelType.RL if "instruction-tuned" in type or "⭕" in type: return ModelType.IFT return ModelType.Unknown class WeightType(Enum): Adapter = ModelDetails("Adapter") Original = ModelDetails("Original") Delta = ModelDetails("Delta") class Precision(Enum): float16 = ModelDetails("float16") bfloat16 = ModelDetails("bfloat16") float32 = ModelDetails("float32") #qt_8bit = ModelDetails("8bit") #qt_4bit = ModelDetails("4bit") #qt_GPTQ = ModelDetails("GPTQ") Unknown = ModelDetails("?") def from_str(precision): if precision in ["torch.float16", "float16"]: return Precision.float16 if precision in ["torch.bfloat16", "bfloat16"]: return Precision.bfloat16 if precision in ["float32"]: return Precision.float32 #if precision in ["8bit"]: # return Precision.qt_8bit #if precision in ["4bit"]: # return Precision.qt_4bit #if precision in ["GPTQ", "None"]: # return Precision.qt_GPTQ return Precision.Unknown # Column selection COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden] TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden] COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden] TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden] EVAL_COLS = [c.name for c in fields(EvalQueueColumn)] EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)] BENCHMARK_COLS = [t.value.col_name for t in Tasks] NUMERIC_INTERVALS = { "?": pd.Interval(-1, 0, closed="right"), "~1.5": pd.Interval(0, 2, closed="right"), "~3": pd.Interval(2, 4, closed="right"), "~7": pd.Interval(4, 9, closed="right"), "~13": pd.Interval(9, 20, closed="right"), "~35": pd.Interval(20, 45, closed="right"), "~60": pd.Interval(45, 70, closed="right"), "70+": pd.Interval(70, 10000, closed="right"), }