Madhavan Iyengar commited on
Commit
7dada68
1 Parent(s): b031be2

change displayed metrics

Browse files
Files changed (1) hide show
  1. src/display/utils.py +24 -25
src/display/utils.py CHANGED
@@ -8,10 +8,8 @@ from src.about import Tasks
8
  def fields(raw_class):
9
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
 
 
11
 
12
- # These classes are for user facing column names,
13
- # to avoid having to change them all around the code
14
- # when a modif is needed
15
  @dataclass
16
  class ColumnContent:
17
  name: str
@@ -20,28 +18,29 @@ class ColumnContent:
20
  hidden: bool = False
21
  never_hidden: bool = False
22
 
23
- ## Leaderboard columns
24
- auto_eval_column_dict = []
25
- # Init
26
- # auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
- auto_eval_column_dict.append(["model_name", ColumnContent, ColumnContent("model_name", "markdown", True, never_hidden=True)])
28
- #Scores
29
- # auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
- auto_eval_column_dict.append(["accuracy", ColumnContent, ColumnContent("accuracy", "number", True)])
31
- for task in Tasks:
32
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
33
- # Model information
34
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
35
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
36
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
37
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
38
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
39
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
40
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
41
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
42
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
43
-
44
- # We use make dataclass to dynamically fill the scores from Tasks
 
45
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
46
 
47
  ## For the queue columns in the submission tab
 
8
  def fields(raw_class):
9
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
 
11
+ from dataclasses import dataclass, make_dataclass
12
 
 
 
 
13
  @dataclass
14
  class ColumnContent:
15
  name: str
 
18
  hidden: bool = False
19
  never_hidden: bool = False
20
 
21
+ # Dynamic fields based on new data schema
22
+ metrics = ['precision', 'recall', 'f1_score', 'accuracy', 'yes_percent']
23
+ auto_eval_column_dict = [
24
+ ["model_name", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)]
25
+ ]
26
+
27
+ # Append metrics as dynamic columns
28
+ for metric in metrics:
29
+ auto_eval_column_dict.append([metric, ColumnContent, ColumnContent(metric.capitalize() + " ⬆️", "number", True)])
30
+
31
+ # Model information remains the same as needed
32
+ auto_eval_column_dict += [
33
+ ["model_type", ColumnContent, ColumnContent("Type", "str", False)],
34
+ ["architecture", ColumnContent, ColumnContent("Architecture", "str", False)],
35
+ ["precision", ColumnContent, ColumnContent("Precision", "str", False)], # Overlap with new metric name; might need renaming in context
36
+ ["license", ColumnContent, ColumnContent("Hub License", "str", False)],
37
+ ["params", ColumnContent, ColumnContent("#Params (B)", "number", False)],
38
+ ["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)],
39
+ ["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)],
40
+ ["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)]
41
+ ]
42
+
43
+ # Make the dataclass for AutoEvalColumn
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
 
46
  ## For the queue columns in the submission tab