Madhavan Iyengar
commited on
Commit
•
7dada68
1
Parent(s):
b031be2
change displayed metrics
Browse files- src/display/utils.py +24 -25
src/display/utils.py
CHANGED
@@ -8,10 +8,8 @@ from src.about import Tasks
|
|
8 |
def fields(raw_class):
|
9 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
|
|
|
11 |
|
12 |
-
# These classes are for user facing column names,
|
13 |
-
# to avoid having to change them all around the code
|
14 |
-
# when a modif is needed
|
15 |
@dataclass
|
16 |
class ColumnContent:
|
17 |
name: str
|
@@ -20,28 +18,29 @@ class ColumnContent:
|
|
20 |
hidden: bool = False
|
21 |
never_hidden: bool = False
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
#
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
# Model information
|
34 |
-
auto_eval_column_dict
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
45 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
46 |
|
47 |
## For the queue columns in the submission tab
|
|
|
8 |
def fields(raw_class):
|
9 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
|
11 |
+
from dataclasses import dataclass, make_dataclass
|
12 |
|
|
|
|
|
|
|
13 |
@dataclass
|
14 |
class ColumnContent:
|
15 |
name: str
|
|
|
18 |
hidden: bool = False
|
19 |
never_hidden: bool = False
|
20 |
|
21 |
+
# Dynamic fields based on new data schema
|
22 |
+
metrics = ['precision', 'recall', 'f1_score', 'accuracy', 'yes_percent']
|
23 |
+
auto_eval_column_dict = [
|
24 |
+
["model_name", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)]
|
25 |
+
]
|
26 |
+
|
27 |
+
# Append metrics as dynamic columns
|
28 |
+
for metric in metrics:
|
29 |
+
auto_eval_column_dict.append([metric, ColumnContent, ColumnContent(metric.capitalize() + " ⬆️", "number", True)])
|
30 |
+
|
31 |
+
# Model information remains the same as needed
|
32 |
+
auto_eval_column_dict += [
|
33 |
+
["model_type", ColumnContent, ColumnContent("Type", "str", False)],
|
34 |
+
["architecture", ColumnContent, ColumnContent("Architecture", "str", False)],
|
35 |
+
["precision", ColumnContent, ColumnContent("Precision", "str", False)], # Overlap with new metric name; might need renaming in context
|
36 |
+
["license", ColumnContent, ColumnContent("Hub License", "str", False)],
|
37 |
+
["params", ColumnContent, ColumnContent("#Params (B)", "number", False)],
|
38 |
+
["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)],
|
39 |
+
["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)],
|
40 |
+
["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)]
|
41 |
+
]
|
42 |
+
|
43 |
+
# Make the dataclass for AutoEvalColumn
|
44 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
45 |
|
46 |
## For the queue columns in the submission tab
|