Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
[Feat] add fail category (#23)
Browse files- [Feat] add fail category (e2385abad6153e453d7df66ad899d879d90bafea)
- Update src/load_from_hub.py (336d66cf54b2cc54a4d6ff161b4a3b9d7922af9b)
- Update app.py (5c4b2a2e52341e94de7931228c6ae3a90f83de3c)
Co-authored-by: HyeonwooKim <[email protected]>
- app.py +12 -0
- src/load_from_hub.py +4 -1
app.py
CHANGED
@@ -117,6 +117,7 @@ leaderboard_df = original_df.copy()
|
|
117 |
finished_eval_queue_df,
|
118 |
running_eval_queue_df,
|
119 |
pending_eval_queue_df,
|
|
|
120 |
) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
|
121 |
|
122 |
## INTERACTION FUNCTIONS
|
@@ -513,6 +514,17 @@ with demo:
|
|
513 |
datatype=EVAL_TYPES,
|
514 |
max_rows=5,
|
515 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
516 |
with gr.Row():
|
517 |
gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
518 |
|
|
|
117 |
finished_eval_queue_df,
|
118 |
running_eval_queue_df,
|
119 |
pending_eval_queue_df,
|
120 |
+
failed_eval_queue_df,
|
121 |
) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
|
122 |
|
123 |
## INTERACTION FUNCTIONS
|
|
|
514 |
datatype=EVAL_TYPES,
|
515 |
max_rows=5,
|
516 |
)
|
517 |
+
with gr.Accordion(
|
518 |
+
f"❌ Failed Evaluations ({len(failed_eval_queue_df)})",
|
519 |
+
open=False,
|
520 |
+
):
|
521 |
+
with gr.Row():
|
522 |
+
pending_eval_table = gr.components.Dataframe(
|
523 |
+
value=failed_eval_queue_df,
|
524 |
+
headers=EVAL_COLS,
|
525 |
+
datatype=EVAL_TYPES,
|
526 |
+
max_rows=5,
|
527 |
+
)
|
528 |
with gr.Row():
|
529 |
gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
530 |
|
src/load_from_hub.py
CHANGED
@@ -123,10 +123,13 @@ def get_evaluation_queue_df(
|
|
123 |
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
124 |
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
125 |
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
|
|
126 |
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
127 |
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
128 |
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
129 |
-
|
|
|
|
|
130 |
|
131 |
|
132 |
def is_model_on_hub(model_name: str, revision: str) -> bool:
|
|
|
123 |
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
124 |
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
125 |
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
126 |
+
failed_list = [e for e in all_evals if e["status"] == "FAILED"]
|
127 |
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
128 |
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
129 |
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
130 |
+
df_failed = pd.DataFrame.from_records(failed_list, columns=cols)
|
131 |
+
return df_finished[cols], df_running[cols], df_pending[cols], df_failed[cols]
|
132 |
+
|
133 |
|
134 |
|
135 |
def is_model_on_hub(model_name: str, revision: str) -> bool:
|