eduagarcia commited on
Commit
67c432c
1 Parent(s): 49da771

fix ModelType is not defined

Browse files
Files changed (1) hide show
  1. src/display/utils.py +25 -25
src/display/utils.py CHANGED
@@ -166,31 +166,6 @@ human_baseline_row[AutoEvalColumn.npm.name] = round(sum(npm) / len(npm), 2)
166
  if GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS:
167
  human_baseline_row["🤗 Leaderboard Average"] = None
168
 
169
- #External models
170
- external_rows = []
171
- if os.path.exists('external_models_results.json'):
172
- with open('external_models_results.json', 'r', encoding='utf8') as f:
173
- all_models = json.load(f)
174
- for model_data in all_models:
175
- model_row = deepcopy(baseline_row)
176
- model_row[AutoEvalColumn.model.name] = f'<a target="_blank" href="{model_data["link"]}" style="color: var(--text-color); text-decoration: underline;text-decoration-style: dotted;">{model_data["name"]} [{model_data["date"]}]</a>'
177
- model_row[AutoEvalColumn.dummy.name] = model_data['model']
178
- for task in Tasks:
179
- model_row[task.value.col_name] = round(model_data['result_metrics'][task.value.benchmark]*100, 2)
180
- model_row[AutoEvalColumn.average.name] = round(model_data['result_metrics_average']*100, 2)
181
- model_row[AutoEvalColumn.npm.name] = round(model_data['result_metrics_npm']*100, 2)
182
-
183
- model_type = ModelType.from_str(model_data['model_type'])
184
- model_row[AutoEvalColumn.model_type.name] = model_type.name
185
- model_row[AutoEvalColumn.model_type_symbol.name] = model_type.symbol
186
- if model_type == ModelType.proprietary:
187
- model_row[AutoEvalColumn.license.name] = "Proprietary"
188
- if 'params' in model_data:
189
- model_row[AutoEvalColumn.params.name] = model_data['params']
190
-
191
- model_row[AutoEvalColumn.main_language.name] = model_data['main_language']
192
- external_rows.append(model_row)
193
-
194
  @dataclass
195
  class ModelDetails:
196
  name: str
@@ -273,6 +248,31 @@ class Language(Enum):
273
  return Language.Other
274
  return Language.Unknown
275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
  # Column selection
278
  COLS = [c.name for c in fields(AutoEvalColumn)]
 
166
  if GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS:
167
  human_baseline_row["🤗 Leaderboard Average"] = None
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  @dataclass
170
  class ModelDetails:
171
  name: str
 
248
  return Language.Other
249
  return Language.Unknown
250
 
251
+ #External models
252
+ external_rows = []
253
+ if os.path.exists('external_models_results.json'):
254
+ with open('external_models_results.json', 'r', encoding='utf8') as f:
255
+ all_models = json.load(f)
256
+ for model_data in all_models:
257
+ model_row = deepcopy(baseline_row)
258
+ model_row[AutoEvalColumn.model.name] = f'<a target="_blank" href="{model_data["link"]}" style="color: var(--text-color); text-decoration: underline;text-decoration-style: dotted;">{model_data["name"]} [{model_data["date"]}]</a>'
259
+ model_row[AutoEvalColumn.dummy.name] = model_data['model']
260
+ for task in Tasks:
261
+ model_row[task.value.col_name] = round(model_data['result_metrics'][task.value.benchmark]*100, 2)
262
+ model_row[AutoEvalColumn.average.name] = round(model_data['result_metrics_average']*100, 2)
263
+ model_row[AutoEvalColumn.npm.name] = round(model_data['result_metrics_npm']*100, 2)
264
+
265
+ model_type = ModelType.from_str(model_data['model_type'])
266
+ model_row[AutoEvalColumn.model_type.name] = model_type.name
267
+ model_row[AutoEvalColumn.model_type_symbol.name] = model_type.symbol
268
+ if model_type == ModelType.proprietary:
269
+ model_row[AutoEvalColumn.license.name] = "Proprietary"
270
+ if 'params' in model_data:
271
+ model_row[AutoEvalColumn.params.name] = model_data['params']
272
+
273
+ model_row[AutoEvalColumn.main_language.name] = model_data['main_language']
274
+ external_rows.append(model_row)
275
+
276
 
277
  # Column selection
278
  COLS = [c.name for c in fields(AutoEvalColumn)]