Spaces:
Runtime error
Runtime error
Commit
•
d2179b0
1
Parent(s):
ed1fdef
Add a model size filter ✨ (#218)
Browse files- Add a model size filter ✨ (64f1a6e6406aec4f07846a78fe8528b3c1d71c4c)
- New style 😎 (31944c88cfa84efcfedcd257f460932059d08ca6)
Co-authored-by: Apolinário from multimodal AI art <[email protected]>
- app.py +72 -16
- src/assets/css_html_js.py +33 -1
app.py
CHANGED
@@ -294,7 +294,30 @@ def filter_items(df, leaderboard_table, query):
|
|
294 |
if AutoEvalColumn.model_type_symbol.name in leaderboard_table.columns:
|
295 |
filtered_df = df[(df[AutoEvalColumn.model_type_symbol.name] == query)]
|
296 |
else:
|
297 |
-
return leaderboard_table.columns
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
298 |
return filtered_df[leaderboard_table.columns]
|
299 |
|
300 |
def change_tab(query_param):
|
@@ -310,6 +333,10 @@ def change_tab(query_param):
|
|
310 |
else:
|
311 |
return gr.Tabs.update(selected=0)
|
312 |
|
|
|
|
|
|
|
|
|
313 |
|
314 |
demo = gr.Blocks(css=custom_css)
|
315 |
with demo:
|
@@ -332,18 +359,44 @@ with demo:
|
|
332 |
show_label=False,
|
333 |
elem_id="search-bar",
|
334 |
)
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
leaderboard_table = gr.components.Dataframe(
|
348 |
value=leaderboard_df[[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name]],
|
349 |
headers=[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name],
|
@@ -367,8 +420,11 @@ with demo:
|
|
367 |
[hidden_leaderboard_table_for_search, leaderboard_table, search_bar],
|
368 |
leaderboard_table,
|
369 |
)
|
370 |
-
|
371 |
-
|
|
|
|
|
|
|
372 |
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
373 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
374 |
|
@@ -495,4 +551,4 @@ with demo:
|
|
495 |
scheduler = BackgroundScheduler()
|
496 |
scheduler.add_job(restart_space, "interval", seconds=3600)
|
497 |
scheduler.start()
|
498 |
-
demo.queue(concurrency_count=40).launch()
|
|
|
294 |
if AutoEvalColumn.model_type_symbol.name in leaderboard_table.columns:
|
295 |
filtered_df = df[(df[AutoEvalColumn.model_type_symbol.name] == query)]
|
296 |
else:
|
297 |
+
return filtered_df[leaderboard_table.columns]
|
298 |
+
return filtered_df[leaderboard_table.columns]
|
299 |
+
|
300 |
+
def filter_items_size(df, leaderboard_table, query):
|
301 |
+
numeric_intervals = {
|
302 |
+
"all": None,
|
303 |
+
"< 1B": (0, 1),
|
304 |
+
"~3B": (1, 5),
|
305 |
+
"~7B": (6, 11),
|
306 |
+
"~13B": (12, 15),
|
307 |
+
"~35B": (16, 55),
|
308 |
+
"60B+": (55, 1000)
|
309 |
+
}
|
310 |
+
|
311 |
+
if query == "all":
|
312 |
+
return df[leaderboard_table.columns]
|
313 |
+
|
314 |
+
numeric_interval = numeric_intervals[query]
|
315 |
+
|
316 |
+
if AutoEvalColumn.params.name in leaderboard_table.columns:
|
317 |
+
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors='coerce')
|
318 |
+
filtered_df = df[params_column.between(*numeric_interval)]
|
319 |
+
else:
|
320 |
+
return filtered_df[leaderboard_table.columns]
|
321 |
return filtered_df[leaderboard_table.columns]
|
322 |
|
323 |
def change_tab(query_param):
|
|
|
333 |
else:
|
334 |
return gr.Tabs.update(selected=0)
|
335 |
|
336 |
+
def update_filter_type(input_type, shown_columns):
|
337 |
+
shown_columns.append(AutoEvalColumn.params.name)
|
338 |
+
return gr.update(visible=(input_type == 'types')), gr.update(visible=(input_type == 'sizes')), shown_columns
|
339 |
+
|
340 |
|
341 |
demo = gr.Blocks(css=custom_css)
|
342 |
with demo:
|
|
|
359 |
show_label=False,
|
360 |
elem_id="search-bar",
|
361 |
)
|
362 |
+
with gr.Box(elem_id="box-filter"):
|
363 |
+
filter_type = gr.Dropdown(
|
364 |
+
label="⏚ Filter model",
|
365 |
+
choices=["types", "sizes"], value="types",
|
366 |
+
interactive=True,
|
367 |
+
elem_id="filter_type"
|
368 |
+
)
|
369 |
+
filter_columns = gr.Radio(
|
370 |
+
label="⏚ Filter model types",
|
371 |
+
show_label=False,
|
372 |
+
choices = [
|
373 |
+
"all",
|
374 |
+
ModelType.PT.to_str(),
|
375 |
+
ModelType.FT.to_str(),
|
376 |
+
ModelType.IFT.to_str(),
|
377 |
+
ModelType.RL.to_str(),
|
378 |
+
],
|
379 |
+
value="all",
|
380 |
+
elem_id="filter-columns"
|
381 |
+
)
|
382 |
+
filter_columns_size = gr.Radio(
|
383 |
+
label="⏚ Filter model sizes",
|
384 |
+
show_label=False,
|
385 |
+
choices = [
|
386 |
+
"all",
|
387 |
+
"< 1B",
|
388 |
+
"~3B",
|
389 |
+
"~7B",
|
390 |
+
"~13B",
|
391 |
+
"~35B",
|
392 |
+
"60B+"
|
393 |
+
],
|
394 |
+
value="all",
|
395 |
+
visible=False,
|
396 |
+
interactive=True,
|
397 |
+
elem_id="filter-columns-size"
|
398 |
+
)
|
399 |
+
|
400 |
leaderboard_table = gr.components.Dataframe(
|
401 |
value=leaderboard_df[[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name]],
|
402 |
headers=[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name],
|
|
|
420 |
[hidden_leaderboard_table_for_search, leaderboard_table, search_bar],
|
421 |
leaderboard_table,
|
422 |
)
|
423 |
+
|
424 |
+
filter_type.change(update_filter_type,inputs=[filter_type, shown_columns],outputs=[filter_columns, filter_columns_size, shown_columns],queue=False).then(select_columns, [hidden_leaderboard_table_for_search, shown_columns], leaderboard_table, queue=False)
|
425 |
+
shown_columns.change(select_columns, [hidden_leaderboard_table_for_search, shown_columns], leaderboard_table, queue=False)
|
426 |
+
filter_columns.change(filter_items, [hidden_leaderboard_table_for_search, leaderboard_table, filter_columns], leaderboard_table, queue=False)
|
427 |
+
filter_columns_size.change(filter_items_size, [hidden_leaderboard_table_for_search, leaderboard_table, filter_columns_size], leaderboard_table, queue=False)
|
428 |
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
429 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
430 |
|
|
|
551 |
scheduler = BackgroundScheduler()
|
552 |
scheduler.add_job(restart_space, "interval", seconds=3600)
|
553 |
scheduler.start()
|
554 |
+
demo.queue(concurrency_count=40).launch()
|
src/assets/css_html_js.py
CHANGED
@@ -68,6 +68,38 @@ table th:first-child {
|
|
68 |
#scale-logo .download {
|
69 |
display: none;
|
70 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
"""
|
72 |
|
73 |
get_window_url_params = """
|
@@ -76,4 +108,4 @@ get_window_url_params = """
|
|
76 |
url_params = Object.fromEntries(params);
|
77 |
return url_params;
|
78 |
}
|
79 |
-
"""
|
|
|
68 |
#scale-logo .download {
|
69 |
display: none;
|
70 |
}
|
71 |
+
#filter_type{
|
72 |
+
border: 0;
|
73 |
+
padding-left: 0;
|
74 |
+
padding-top: 0;
|
75 |
+
}
|
76 |
+
#filter_type label {
|
77 |
+
display: flex;
|
78 |
+
}
|
79 |
+
#filter_type label > span{
|
80 |
+
margin-top: var(--spacing-lg);
|
81 |
+
margin-right: 0.5em;
|
82 |
+
}
|
83 |
+
#filter_type label > .wrap{
|
84 |
+
width: 103px;
|
85 |
+
}
|
86 |
+
#filter_type label > .wrap .wrap-inner{
|
87 |
+
padding: 2px;
|
88 |
+
}
|
89 |
+
#filter_type label > .wrap .wrap-inner input{
|
90 |
+
width: 1px
|
91 |
+
}
|
92 |
+
#filter-columns{
|
93 |
+
border:0;
|
94 |
+
padding:0;
|
95 |
+
}
|
96 |
+
#filter-columns-size{
|
97 |
+
border:0;
|
98 |
+
padding:0;
|
99 |
+
}
|
100 |
+
#box-filter > .form{
|
101 |
+
border: 0
|
102 |
+
}
|
103 |
"""
|
104 |
|
105 |
get_window_url_params = """
|
|
|
108 |
url_params = Object.fromEntries(params);
|
109 |
return url_params;
|
110 |
}
|
111 |
+
"""
|