Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
corrected leaderboard code
Browse files- .DS_Store +0 -0
- Makefile +13 -0
- README.md +29 -6
- app.py +180 -151
- app_empty.py +0 -7
- backend-cli.py +0 -187
- manage_repos.ipynb +0 -226
- pyproject.toml +13 -0
- requirements.txt +15 -31
- scripts/create_request_file.py +107 -0
- src/.DS_Store +0 -0
- src/backend/.DS_Store +0 -0
- src/backend/envs.py +0 -54
- src/backend/manage_requests.py +0 -140
- src/backend/run_eval_suite.py +0 -75
- src/backend/sort_queue.py +0 -28
- src/display/about.py +72 -12
- src/display/css_html_js.py +0 -4
- src/display/formatting.py +0 -6
- src/display/utils.py +9 -36
- src/envs.py +10 -29
- src/leaderboard/filter_models.py +0 -50
- src/leaderboard/read_evals.py +37 -68
- src/populate.py +8 -41
- src/submission/check_validity.py +28 -119
- src/submission/submit.py +10 -83
- src/utils.py +0 -29
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
Makefile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.PHONY: style format
|
2 |
+
|
3 |
+
|
4 |
+
style:
|
5 |
+
python -m black --line-length 119 .
|
6 |
+
python -m isort .
|
7 |
+
ruff check --fix .
|
8 |
+
|
9 |
+
|
10 |
+
quality:
|
11 |
+
python -m black --check --line-length 119 .
|
12 |
+
python -m isort --check-only .
|
13 |
+
ruff check .
|
README.md
CHANGED
@@ -1,13 +1,36 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: Demo Leaderboard
|
3 |
+
emoji: 🥇
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.4.0
|
8 |
app_file: app.py
|
9 |
+
pinned: true
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
+
|
15 |
+
Most of the variables to change for a default leaderboard are in env (replace the path for your leaderboard) and src/display/about.
|
16 |
+
|
17 |
+
Results files should have the following format:
|
18 |
+
```
|
19 |
+
{
|
20 |
+
"config": {
|
21 |
+
"model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
|
22 |
+
"model_name": "path of the model on the hub: org/model",
|
23 |
+
"model_sha": "revision on the hub",
|
24 |
+
},
|
25 |
+
"results": {
|
26 |
+
"task_name": {
|
27 |
+
"metric_name": score,
|
28 |
+
},
|
29 |
+
"task_name2": {
|
30 |
+
"metric_name": score,
|
31 |
+
}
|
32 |
+
}
|
33 |
+
}
|
34 |
+
```
|
35 |
+
|
36 |
+
Request files are created automatically by this tool.
|
app.py
CHANGED
@@ -1,9 +1,47 @@
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
-
|
4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
from src.display.css_html_js import custom_css
|
7 |
|
8 |
from src.display.about import (
|
9 |
CITATION_BUTTON_LABEL,
|
@@ -11,11 +49,9 @@ from src.display.about import (
|
|
11 |
EVALUATION_QUEUE_TEXT,
|
12 |
INTRODUCTION_TEXT,
|
13 |
LLM_BENCHMARKS_TEXT,
|
14 |
-
LLM_BENCHMARKS_DETAILS,
|
15 |
-
FAQ_TEXT,
|
16 |
TITLE,
|
17 |
)
|
18 |
-
|
19 |
from src.display.utils import (
|
20 |
BENCHMARK_COLS,
|
21 |
COLS,
|
@@ -29,41 +65,56 @@ from src.display.utils import (
|
|
29 |
WeightType,
|
30 |
Precision
|
31 |
)
|
32 |
-
|
33 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
34 |
-
|
35 |
-
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
36 |
from src.submission.submit import add_new_eval
|
37 |
|
38 |
-
from src.display.utils import Tasks
|
39 |
-
|
40 |
-
from huggingface_hub import snapshot_download
|
41 |
-
|
42 |
-
## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## -------##
|
43 |
|
44 |
def restart_space():
|
45 |
-
API.restart_space(repo_id=REPO_ID, token=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout):
|
48 |
-
try:
|
49 |
-
print(f"local_dir for snapshot download = {local_dir}")
|
50 |
-
snapshot_download(repo_id=repo_id, local_dir=local_dir, repo_type=repo_type, tqdm_class=tqdm_class, etag_timeout=etag_timeout)
|
51 |
-
except Exception:
|
52 |
-
print(f"ui_snapshot_download failed. restarting space...")
|
53 |
-
restart_space()
|
54 |
|
55 |
# Searching and filtering
|
56 |
-
def update_table(
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
|
60 |
-
|
61 |
-
print(f"filtered_df = {filtered_df}")
|
62 |
filtered_df = filter_queries(query, filtered_df)
|
63 |
df = select_columns(filtered_df, columns)
|
64 |
-
print(f"df = {df}")
|
65 |
return df
|
66 |
|
|
|
67 |
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
68 |
return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
|
69 |
|
@@ -79,7 +130,8 @@ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
|
|
79 |
]
|
80 |
return filtered_df
|
81 |
|
82 |
-
|
|
|
83 |
final_df = []
|
84 |
if query != "":
|
85 |
queries = [q.strip() for q in query.split(";")]
|
@@ -98,22 +150,18 @@ def filter_queries(query: str, filtered_df: pd.DataFrame):
|
|
98 |
return filtered_df
|
99 |
|
100 |
|
101 |
-
def filter_models(
|
102 |
-
|
103 |
-
|
104 |
-
print("aa this is an example", df)
|
105 |
-
print(f"filter_models()'s df: {df}\n")
|
106 |
# Show all models
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
111 |
|
112 |
type_emoji = [t[0] for t in type_query]
|
113 |
-
print("aa this is an example", df, AutoEvalColumn.model_type_symbol.name, "thhhthht")
|
114 |
-
print("type", type_emoji)
|
115 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
|
116 |
-
print("bb", filtered_df)
|
117 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
|
118 |
|
119 |
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
|
@@ -124,34 +172,21 @@ def filter_models(df: pd.DataFrame, type_query: list, size_query: list, precisio
|
|
124 |
return filtered_df
|
125 |
|
126 |
|
127 |
-
## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## -------
|
128 |
-
|
129 |
-
ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
|
130 |
-
ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
|
131 |
-
|
132 |
-
print(f"COLS = {COLS}")
|
133 |
-
|
134 |
-
|
135 |
-
raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) # k the problem is that the results are only saved in _bk dirs.
|
136 |
-
leaderboard_df = original_df.copy()
|
137 |
-
print(f"leaderboard_df = {leaderboard_df}")
|
138 |
-
|
139 |
-
|
140 |
-
################################################################################################################################
|
141 |
demo = gr.Blocks(css=custom_css)
|
142 |
with demo:
|
143 |
gr.HTML(TITLE)
|
144 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
145 |
|
146 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
147 |
-
|
148 |
-
# toggle break 1: this tab just RENDERS existing result files on remote repo.
|
149 |
-
with gr.TabItem("Benchmarks", elem_id="llm-benchmark-tab-table", id=0):
|
150 |
-
|
151 |
with gr.Row():
|
152 |
with gr.Column():
|
153 |
with gr.Row():
|
154 |
-
search_bar = gr.Textbox(
|
|
|
|
|
|
|
|
|
155 |
with gr.Row():
|
156 |
shown_columns = gr.CheckboxGroup(
|
157 |
choices=[
|
@@ -168,8 +203,12 @@ with demo:
|
|
168 |
elem_id="column-select",
|
169 |
interactive=True,
|
170 |
)
|
171 |
-
|
|
|
|
|
|
|
172 |
with gr.Column(min_width=320):
|
|
|
173 |
filter_columns_type = gr.CheckboxGroup(
|
174 |
label="Model types",
|
175 |
choices=[t.to_str() for t in ModelType],
|
@@ -192,41 +231,41 @@ with demo:
|
|
192 |
elem_id="filter-columns-size",
|
193 |
)
|
194 |
|
195 |
-
# leaderboard_table = gr.components.Dataframe(
|
196 |
-
# value=leaderboard_df[
|
197 |
-
# [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
198 |
-
# + shown_columns.value
|
199 |
-
# + [AutoEvalColumn.dummy.name]
|
200 |
-
# ] if leaderboard_df.empty is False else leaderboard_df,
|
201 |
-
# headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
|
202 |
-
# datatype=TYPES,
|
203 |
-
# elem_id="leaderboard-table",
|
204 |
-
# interactive=False,
|
205 |
-
# visible=True,
|
206 |
-
# column_widths=["2%", "20%"]
|
207 |
-
# )
|
208 |
leaderboard_table = gr.components.Dataframe(
|
209 |
-
# value=leaderboard_df,
|
210 |
value=leaderboard_df[
|
211 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
212 |
+ shown_columns.value
|
213 |
+ [AutoEvalColumn.dummy.name]
|
214 |
-
]
|
215 |
headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
|
216 |
datatype=TYPES,
|
217 |
elem_id="leaderboard-table",
|
218 |
interactive=False,
|
219 |
visible=True,
|
220 |
-
|
221 |
)
|
|
|
222 |
# Dummy leaderboard for handling the case when the user uses backspace key
|
223 |
hidden_leaderboard_table_for_search = gr.components.Dataframe(
|
224 |
-
value=original_df[COLS]
|
225 |
headers=COLS,
|
226 |
datatype=TYPES,
|
227 |
-
visible=False
|
228 |
)
|
229 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
selector.change(
|
231 |
update_table,
|
232 |
[
|
@@ -235,62 +274,63 @@ with demo:
|
|
235 |
filter_columns_type,
|
236 |
filter_columns_precision,
|
237 |
filter_columns_size,
|
|
|
238 |
search_bar,
|
239 |
],
|
240 |
leaderboard_table,
|
241 |
queue=True,
|
242 |
)
|
243 |
|
244 |
-
|
|
|
|
|
245 |
with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
with gr.Row():
|
286 |
gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
287 |
|
288 |
with gr.Row():
|
289 |
with gr.Column():
|
290 |
model_name_textbox = gr.Textbox(label="Model name")
|
291 |
-
# You can use the revision parameter to point to the specific commit hash when downloading.
|
292 |
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
293 |
-
private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
|
294 |
model_type = gr.Dropdown(
|
295 |
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
296 |
label="Model type",
|
@@ -304,7 +344,7 @@ with demo:
|
|
304 |
choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
305 |
label="Precision",
|
306 |
multiselect=False,
|
307 |
-
value="
|
308 |
interactive=True,
|
309 |
)
|
310 |
weight_type = gr.Dropdown(
|
@@ -314,51 +354,40 @@ with demo:
|
|
314 |
value="Original",
|
315 |
interactive=True,
|
316 |
)
|
317 |
-
|
318 |
-
|
319 |
-
requested_tasks = gr.CheckboxGroup(
|
320 |
-
choices=[ (i.value.col_name, i.value) for i in Tasks],
|
321 |
-
|
322 |
-
label="Select tasks",
|
323 |
-
elem_id="task-select",
|
324 |
-
interactive=True,
|
325 |
-
)
|
326 |
-
|
327 |
-
|
328 |
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
329 |
|
330 |
submit_button = gr.Button("Submit Eval")
|
331 |
submission_result = gr.Markdown()
|
332 |
-
|
333 |
-
# we need to add task specification argument here as well.
|
334 |
submit_button.click(
|
335 |
add_new_eval,
|
336 |
[
|
337 |
model_name_textbox,
|
338 |
-
|
339 |
-
requested_tasks, # is this a list of str or class Task? i think it's Task.
|
340 |
-
|
341 |
base_model_name_textbox,
|
342 |
revision_name_textbox,
|
343 |
precision,
|
344 |
-
private,
|
345 |
weight_type,
|
346 |
model_type,
|
347 |
],
|
348 |
-
submission_result
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
# demo.launch()
|
353 |
|
354 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
|
356 |
scheduler = BackgroundScheduler()
|
357 |
-
|
358 |
-
scheduler.add_job(restart_space, "interval", seconds=6 * 60 * 60)
|
359 |
-
|
360 |
scheduler.start()
|
361 |
-
|
|
|
362 |
|
363 |
-
#
|
364 |
-
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
|
|
3 |
from apscheduler.schedulers.background import BackgroundScheduler
|
4 |
+
from huggingface_hub import snapshot_download
|
5 |
+
|
6 |
+
# import os
|
7 |
+
|
8 |
+
# # Define the folders to delete
|
9 |
+
# folders_to_delete = ['eval-results', 'eval-queue']
|
10 |
+
|
11 |
+
# import shutil
|
12 |
+
|
13 |
+
# # Delete the folders and their contents
|
14 |
+
# deleted_folders = []
|
15 |
+
# nonexistent_folders = []
|
16 |
+
|
17 |
+
# for folder in folders_to_delete:
|
18 |
+
# if os.path.exists(folder) and os.path.isdir(folder):
|
19 |
+
# shutil.rmtree(folder) # This removes the directory and its contents
|
20 |
+
# deleted_folders.append(folder)
|
21 |
+
# else:
|
22 |
+
# nonexistent_folders.append(folder)
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
# import subprocess
|
27 |
+
# import signal
|
28 |
+
|
29 |
+
# # Find and kill processes running on port 7878
|
30 |
+
# try:
|
31 |
+
# # Find process using port 7878
|
32 |
+
# output = subprocess.check_output(["lsof", "-ti", "tcp:7878"]).decode().strip()
|
33 |
+
# if output:
|
34 |
+
# # Split the output in case there are multiple PIDs
|
35 |
+
# pids = output.split('\n')
|
36 |
+
# for pid in pids:
|
37 |
+
# # Kill each process
|
38 |
+
# os.kill(int(pid), signal.SIGKILL)
|
39 |
+
# result = "Processes running on port 7878 have been killed."
|
40 |
+
# else:
|
41 |
+
# result = "No processes are running on port 7878."
|
42 |
+
# except Exception as e:
|
43 |
+
# result = f"An error occurred: {str(e)}"
|
44 |
|
|
|
45 |
|
46 |
from src.display.about import (
|
47 |
CITATION_BUTTON_LABEL,
|
|
|
49 |
EVALUATION_QUEUE_TEXT,
|
50 |
INTRODUCTION_TEXT,
|
51 |
LLM_BENCHMARKS_TEXT,
|
|
|
|
|
52 |
TITLE,
|
53 |
)
|
54 |
+
from src.display.css_html_js import custom_css
|
55 |
from src.display.utils import (
|
56 |
BENCHMARK_COLS,
|
57 |
COLS,
|
|
|
65 |
WeightType,
|
66 |
Precision
|
67 |
)
|
68 |
+
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, TOKEN, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
69 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
|
|
|
|
70 |
from src.submission.submit import add_new_eval
|
71 |
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
def restart_space():
|
74 |
+
API.restart_space(repo_id=REPO_ID, token=TOKEN)
|
75 |
+
|
76 |
+
try:
|
77 |
+
print(EVAL_REQUESTS_PATH)
|
78 |
+
snapshot_download(
|
79 |
+
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
|
80 |
+
)
|
81 |
+
except Exception:
|
82 |
+
restart_space()
|
83 |
+
try:
|
84 |
+
print(EVAL_RESULTS_PATH)
|
85 |
+
snapshot_download(
|
86 |
+
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
|
87 |
+
)
|
88 |
+
except Exception:
|
89 |
+
restart_space()
|
90 |
+
|
91 |
+
|
92 |
+
raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
93 |
+
leaderboard_df = original_df.copy()
|
94 |
+
|
95 |
+
(
|
96 |
+
finished_eval_queue_df,
|
97 |
+
running_eval_queue_df,
|
98 |
+
pending_eval_queue_df,
|
99 |
+
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
# Searching and filtering
|
103 |
+
def update_table(
|
104 |
+
hidden_df: pd.DataFrame,
|
105 |
+
columns: list,
|
106 |
+
type_query: list,
|
107 |
+
precision_query: str,
|
108 |
+
size_query: list,
|
109 |
+
show_deleted: bool,
|
110 |
+
query: str,
|
111 |
+
):
|
112 |
filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
|
|
|
|
|
113 |
filtered_df = filter_queries(query, filtered_df)
|
114 |
df = select_columns(filtered_df, columns)
|
|
|
115 |
return df
|
116 |
|
117 |
+
|
118 |
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
119 |
return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
|
120 |
|
|
|
130 |
]
|
131 |
return filtered_df
|
132 |
|
133 |
+
|
134 |
+
def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
|
135 |
final_df = []
|
136 |
if query != "":
|
137 |
queries = [q.strip() for q in query.split(";")]
|
|
|
150 |
return filtered_df
|
151 |
|
152 |
|
153 |
+
def filter_models(
|
154 |
+
df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
|
155 |
+
) -> pd.DataFrame:
|
|
|
|
|
156 |
# Show all models
|
157 |
+
filtered_df = df
|
158 |
+
# if show_deleted:
|
159 |
+
# filtered_df = df
|
160 |
+
# else: # Show only still on the hub models
|
161 |
+
# filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
|
162 |
|
163 |
type_emoji = [t[0] for t in type_query]
|
|
|
|
|
164 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
|
|
|
165 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
|
166 |
|
167 |
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
|
|
|
172 |
return filtered_df
|
173 |
|
174 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
demo = gr.Blocks(css=custom_css)
|
176 |
with demo:
|
177 |
gr.HTML(TITLE)
|
178 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
179 |
|
180 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
181 |
+
with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
|
|
|
|
|
|
182 |
with gr.Row():
|
183 |
with gr.Column():
|
184 |
with gr.Row():
|
185 |
+
search_bar = gr.Textbox(
|
186 |
+
placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
|
187 |
+
show_label=False,
|
188 |
+
elem_id="search-bar",
|
189 |
+
)
|
190 |
with gr.Row():
|
191 |
shown_columns = gr.CheckboxGroup(
|
192 |
choices=[
|
|
|
203 |
elem_id="column-select",
|
204 |
interactive=True,
|
205 |
)
|
206 |
+
with gr.Row():
|
207 |
+
deleted_models_visibility = gr.Checkbox(
|
208 |
+
value=True, label="Show gated/private/deleted models", interactive=True
|
209 |
+
)
|
210 |
with gr.Column(min_width=320):
|
211 |
+
#with gr.Box(elem_id="box-filter"):
|
212 |
filter_columns_type = gr.CheckboxGroup(
|
213 |
label="Model types",
|
214 |
choices=[t.to_str() for t in ModelType],
|
|
|
231 |
elem_id="filter-columns-size",
|
232 |
)
|
233 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
leaderboard_table = gr.components.Dataframe(
|
|
|
235 |
value=leaderboard_df[
|
236 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
237 |
+ shown_columns.value
|
238 |
+ [AutoEvalColumn.dummy.name]
|
239 |
+
],
|
240 |
headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
|
241 |
datatype=TYPES,
|
242 |
elem_id="leaderboard-table",
|
243 |
interactive=False,
|
244 |
visible=True,
|
245 |
+
column_widths=["2%", "33%"]
|
246 |
)
|
247 |
+
|
248 |
# Dummy leaderboard for handling the case when the user uses backspace key
|
249 |
hidden_leaderboard_table_for_search = gr.components.Dataframe(
|
250 |
+
value=original_df[COLS],
|
251 |
headers=COLS,
|
252 |
datatype=TYPES,
|
253 |
+
visible=False,
|
254 |
)
|
255 |
+
search_bar.submit(
|
256 |
+
update_table,
|
257 |
+
[
|
258 |
+
hidden_leaderboard_table_for_search,
|
259 |
+
shown_columns,
|
260 |
+
filter_columns_type,
|
261 |
+
filter_columns_precision,
|
262 |
+
filter_columns_size,
|
263 |
+
deleted_models_visibility,
|
264 |
+
search_bar,
|
265 |
+
],
|
266 |
+
leaderboard_table,
|
267 |
+
)
|
268 |
+
for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
|
269 |
selector.change(
|
270 |
update_table,
|
271 |
[
|
|
|
274 |
filter_columns_type,
|
275 |
filter_columns_precision,
|
276 |
filter_columns_size,
|
277 |
+
deleted_models_visibility,
|
278 |
search_bar,
|
279 |
],
|
280 |
leaderboard_table,
|
281 |
queue=True,
|
282 |
)
|
283 |
|
284 |
+
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
285 |
+
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
286 |
+
|
287 |
with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Row():
|
290 |
+
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Accordion(
|
294 |
+
f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
|
295 |
+
open=False,
|
296 |
+
):
|
297 |
+
with gr.Row():
|
298 |
+
finished_eval_table = gr.components.Dataframe(
|
299 |
+
value=finished_eval_queue_df,
|
300 |
+
headers=EVAL_COLS,
|
301 |
+
datatype=EVAL_TYPES,
|
302 |
+
row_count=5,
|
303 |
+
)
|
304 |
+
with gr.Accordion(
|
305 |
+
f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
|
306 |
+
open=False,
|
307 |
+
):
|
308 |
+
with gr.Row():
|
309 |
+
running_eval_table = gr.components.Dataframe(
|
310 |
+
value=running_eval_queue_df,
|
311 |
+
headers=EVAL_COLS,
|
312 |
+
datatype=EVAL_TYPES,
|
313 |
+
row_count=5,
|
314 |
+
)
|
315 |
+
|
316 |
+
with gr.Accordion(
|
317 |
+
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
318 |
+
open=False,
|
319 |
+
):
|
320 |
+
with gr.Row():
|
321 |
+
pending_eval_table = gr.components.Dataframe(
|
322 |
+
value=pending_eval_queue_df,
|
323 |
+
headers=EVAL_COLS,
|
324 |
+
datatype=EVAL_TYPES,
|
325 |
+
row_count=5,
|
326 |
+
)
|
327 |
with gr.Row():
|
328 |
gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
329 |
|
330 |
with gr.Row():
|
331 |
with gr.Column():
|
332 |
model_name_textbox = gr.Textbox(label="Model name")
|
|
|
333 |
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
|
|
334 |
model_type = gr.Dropdown(
|
335 |
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
336 |
label="Model type",
|
|
|
344 |
choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
345 |
label="Precision",
|
346 |
multiselect=False,
|
347 |
+
value="float16",
|
348 |
interactive=True,
|
349 |
)
|
350 |
weight_type = gr.Dropdown(
|
|
|
354 |
value="Original",
|
355 |
interactive=True,
|
356 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
357 |
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
358 |
|
359 |
submit_button = gr.Button("Submit Eval")
|
360 |
submission_result = gr.Markdown()
|
|
|
|
|
361 |
submit_button.click(
|
362 |
add_new_eval,
|
363 |
[
|
364 |
model_name_textbox,
|
|
|
|
|
|
|
365 |
base_model_name_textbox,
|
366 |
revision_name_textbox,
|
367 |
precision,
|
|
|
368 |
weight_type,
|
369 |
model_type,
|
370 |
],
|
371 |
+
submission_result,
|
372 |
+
)
|
|
|
|
|
|
|
373 |
|
374 |
+
with gr.Row():
|
375 |
+
with gr.Accordion("📙 Citation", open=False):
|
376 |
+
citation_button = gr.Textbox(
|
377 |
+
value=CITATION_BUTTON_TEXT,
|
378 |
+
label=CITATION_BUTTON_LABEL,
|
379 |
+
lines=20,
|
380 |
+
elem_id="citation-button",
|
381 |
+
show_copy_button=True,
|
382 |
+
)
|
383 |
|
384 |
scheduler = BackgroundScheduler()
|
385 |
+
scheduler.add_job(restart_space, "interval", seconds=1800)
|
|
|
|
|
386 |
scheduler.start()
|
387 |
+
demo.queue(default_concurrency_limit=40).launch()
|
388 |
+
|
389 |
|
390 |
+
# scheduler = BackgroundScheduler()
|
391 |
+
# scheduler.add_job(restart_space, "interval", seconds=6 * 60 * 60)
|
392 |
+
# scheduler.start()
|
393 |
+
# demo.queue().launch()
|
app_empty.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
def greet(name):
|
4 |
-
return "Hello " + name + "!!"
|
5 |
-
|
6 |
-
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
-
# iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend-cli.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
import os
|
4 |
-
import json
|
5 |
-
|
6 |
-
import random
|
7 |
-
from datetime import datetime
|
8 |
-
|
9 |
-
from src.backend.run_eval_suite import run_evaluation
|
10 |
-
from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
|
11 |
-
from src.backend.sort_queue import sort_models_by_priority
|
12 |
-
|
13 |
-
|
14 |
-
from src.backend.envs import EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND, DEVICE, LIMIT, Tasks, Task, num_fewshots
|
15 |
-
|
16 |
-
from src.backend.manage_requests import EvalRequest
|
17 |
-
from src.leaderboard.read_evals import EvalResult
|
18 |
-
|
19 |
-
from src.envs import QUEUE_REPO, RESULTS_REPO, API
|
20 |
-
from src.utils import my_snapshot_download
|
21 |
-
|
22 |
-
import time
|
23 |
-
|
24 |
-
import logging
|
25 |
-
import pprint
|
26 |
-
import argparse
|
27 |
-
|
28 |
-
|
29 |
-
# def get_subdirectories(path):
|
30 |
-
# subdirectories = []
|
31 |
-
# # Get all entries in the directory
|
32 |
-
# entries = os.listdir(path)
|
33 |
-
# for entry in entries:
|
34 |
-
# # Check if the entry is a directory
|
35 |
-
# if os.path.isdir(os.path.join(path, entry)):
|
36 |
-
# subdirectories.append(entry)
|
37 |
-
# return subdirectories
|
38 |
-
|
39 |
-
# parser = argparse.ArgumentParser(description="Get subdirectory names")
|
40 |
-
# parser.add_argument("include_path", help="Path to the directory", nargs='?', default=None)
|
41 |
-
# args = parser.parse_args()
|
42 |
-
|
43 |
-
# # = get_subdirectories(args.include_path)
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
def my_set_eval_request(api, eval_request, set_to_status, hf_repo, local_dir):
|
49 |
-
for i in range(10):
|
50 |
-
try:
|
51 |
-
set_eval_request(api=api, eval_request=eval_request, set_to_status=set_to_status, hf_repo=hf_repo, local_dir=local_dir)
|
52 |
-
return
|
53 |
-
except Exception:
|
54 |
-
time.sleep(60)
|
55 |
-
return
|
56 |
-
|
57 |
-
|
58 |
-
logging.getLogger("openai").setLevel(logging.WARNING)
|
59 |
-
|
60 |
-
logging.basicConfig(level=logging.ERROR)
|
61 |
-
pp = pprint.PrettyPrinter(width=80)
|
62 |
-
|
63 |
-
PENDING_STATUS = "PENDING"
|
64 |
-
RUNNING_STATUS = "RUNNING"
|
65 |
-
FINISHED_STATUS = "FINISHED"
|
66 |
-
FAILED_STATUS = "FAILED"
|
67 |
-
|
68 |
-
TASKS_HARNESS = [task.value for task in Tasks]
|
69 |
-
|
70 |
-
# starts by downloading results and requests. makes sense since we want to be able to use different backend servers!
|
71 |
-
my_snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
|
72 |
-
my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
|
73 |
-
|
74 |
-
|
75 |
-
def sanity_checks():
|
76 |
-
print(f'Device: {DEVICE}')
|
77 |
-
|
78 |
-
# pull the eval dataset from the hub and parse any eval requests
|
79 |
-
# check completed evals and set them to finished
|
80 |
-
my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
|
81 |
-
check_completed_evals(api=API, checked_status=RUNNING_STATUS, completed_status=FINISHED_STATUS,
|
82 |
-
failed_status=FAILED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND,
|
83 |
-
hf_repo_results=RESULTS_REPO, local_dir_results=EVAL_RESULTS_PATH_BACKEND)
|
84 |
-
return
|
85 |
-
|
86 |
-
|
87 |
-
def request_to_result_name(request: EvalRequest) -> str:
|
88 |
-
|
89 |
-
org_and_model = request.model.split("/", 1)
|
90 |
-
if len(org_and_model) == 1:
|
91 |
-
model = org_and_model[0]
|
92 |
-
res = f"{model}_{request.precision}"
|
93 |
-
else:
|
94 |
-
org = org_and_model[0]
|
95 |
-
model = org_and_model[1]
|
96 |
-
res = f"{org}_{model}_{request.precision}"
|
97 |
-
return res
|
98 |
-
|
99 |
-
# doesn't make distinctions for tasks since the original code runs eval on ALL tasks.
|
100 |
-
def process_evaluation(task_name: str, eval_request: EvalRequest) -> dict:
|
101 |
-
# batch_size = 1
|
102 |
-
batch_size = "auto"
|
103 |
-
|
104 |
-
# might not have to get the benchmark.
|
105 |
-
print(f"task_name parameter in process_evaluation() = {task_name}") #, task_names=[task.benchmark] = {[task.benchmark]}")
|
106 |
-
|
107 |
-
num_fewshot = num_fewshots[task_name]
|
108 |
-
|
109 |
-
results = run_evaluation(eval_request=eval_request, task_names=task_name, num_fewshot=num_fewshot,
|
110 |
-
batch_size=batch_size, device=DEVICE, use_cache=None, limit=LIMIT)
|
111 |
-
|
112 |
-
print('RESULTS', results)
|
113 |
-
|
114 |
-
dumped = json.dumps(results, indent=2, default=lambda o: '<not serializable>')
|
115 |
-
print(dumped)
|
116 |
-
|
117 |
-
output_path = os.path.join(EVAL_RESULTS_PATH_BACKEND, *eval_request.model.split("/"), f"results_{task_name}_{datetime.now()}.json")
|
118 |
-
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
119 |
-
with open(output_path, "w") as f:
|
120 |
-
f.write(dumped)
|
121 |
-
|
122 |
-
my_snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
|
123 |
-
API.upload_file(path_or_fileobj=output_path, path_in_repo=f"{eval_request.model}/results_{task_name}_{datetime.now()}.json",
|
124 |
-
repo_id=RESULTS_REPO, repo_type="dataset")
|
125 |
-
return results
|
126 |
-
|
127 |
-
|
128 |
-
# the rendering is done with files in local repo.
|
129 |
-
def process_pending_requests() -> bool:
|
130 |
-
sanity_checks()
|
131 |
-
|
132 |
-
current_pending_status = [PENDING_STATUS]
|
133 |
-
|
134 |
-
# Get all eval request that are PENDING, if you want to run other evals, change this parameter
|
135 |
-
# GETTING REQUESTS FROM THE HUB NOT LOCAL DIR.
|
136 |
-
eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
|
137 |
-
# Sort the evals by priority (first submitted first run)
|
138 |
-
eval_requests = sort_models_by_priority(api=API, models=eval_requests)
|
139 |
-
|
140 |
-
random.shuffle(eval_requests)
|
141 |
-
|
142 |
-
# this says zero
|
143 |
-
print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
|
144 |
-
|
145 |
-
if len(eval_requests) == 0:
|
146 |
-
return False
|
147 |
-
|
148 |
-
eval_request = eval_requests[0]
|
149 |
-
pp.pprint(eval_request)
|
150 |
-
|
151 |
-
my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
|
152 |
-
my_set_eval_request(api=API, eval_request=eval_request, set_to_status=RUNNING_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
|
153 |
-
|
154 |
-
# task_lst = TASKS_HARNESS.copy()
|
155 |
-
task_lst = eval_request.get_user_requested_task_names()
|
156 |
-
random.shuffle(task_lst)
|
157 |
-
print(f"task_lst in process_pending_requests(): {task_lst}")
|
158 |
-
|
159 |
-
for task_name in task_lst:
|
160 |
-
|
161 |
-
results = process_evaluation(task_name, eval_request)
|
162 |
-
|
163 |
-
my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
|
164 |
-
my_set_eval_request(api=API, eval_request=eval_request, set_to_status=FINISHED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
|
165 |
-
|
166 |
-
return True
|
167 |
-
|
168 |
-
|
169 |
-
if __name__ == "__main__":
|
170 |
-
# wait = True
|
171 |
-
|
172 |
-
# import socket
|
173 |
-
# if socket.gethostname() in {'hamburg'} or os.path.isdir("/home/pminervi"):
|
174 |
-
# wait = False
|
175 |
-
|
176 |
-
# if wait:
|
177 |
-
# time.sleep(60 * random.randint(2, 5))
|
178 |
-
# pass
|
179 |
-
|
180 |
-
# res = False
|
181 |
-
res = process_pending_requests()
|
182 |
-
|
183 |
-
# if res is False:
|
184 |
-
# res = process_finished_requests(100)
|
185 |
-
|
186 |
-
# if res is False:
|
187 |
-
# res = process_finished_requests(0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
manage_repos.ipynb
DELETED
@@ -1,226 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"cells": [
|
3 |
-
{
|
4 |
-
"cell_type": "markdown",
|
5 |
-
"metadata": {},
|
6 |
-
"source": [
|
7 |
-
"https://huggingface.co/datasets/chaeeunlee/test_requests\n",
|
8 |
-
"\n",
|
9 |
-
"https://huggingface.co/datasets/chaeeunlee/test_results"
|
10 |
-
]
|
11 |
-
},
|
12 |
-
{
|
13 |
-
"cell_type": "code",
|
14 |
-
"execution_count": 1,
|
15 |
-
"metadata": {},
|
16 |
-
"outputs": [
|
17 |
-
{
|
18 |
-
"name": "stdout",
|
19 |
-
"output_type": "stream",
|
20 |
-
"text": [
|
21 |
-
"CACHE_PATH = /Users/chaeeunlee/Documents/VSC_workspaces/huggingface_home_cache\n"
|
22 |
-
]
|
23 |
-
},
|
24 |
-
{
|
25 |
-
"name": "stderr",
|
26 |
-
"output_type": "stream",
|
27 |
-
"text": [
|
28 |
-
"/Users/chaeeunlee/anaconda3/envs/lb/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
29 |
-
" from .autonotebook import tqdm as notebook_tqdm\n"
|
30 |
-
]
|
31 |
-
},
|
32 |
-
{
|
33 |
-
"data": {
|
34 |
-
"text/plain": [
|
35 |
-
"'\\n( path_in_repo: str\\nrepo_id: str\\ntoken: typing.Optional[str] = None\\nrepo_type: typing.Optional[str] = Nonerevision: typing.Optional[str] = Nonecommit_message: typing.Optional[str] = Nonecommit_description: typing.Optional[str] = Nonecreate_pr: typing.Optional[bool] = Noneparent_commit: typing.Optional[str] = None )\\n'"
|
36 |
-
]
|
37 |
-
},
|
38 |
-
"execution_count": 1,
|
39 |
-
"metadata": {},
|
40 |
-
"output_type": "execute_result"
|
41 |
-
}
|
42 |
-
],
|
43 |
-
"source": [
|
44 |
-
"from src.envs import H4_TOKEN, API, QUEUE_REPO, RESULTS_REPO, REPO_ID\n",
|
45 |
-
"\n",
|
46 |
-
"from huggingface_hub import HfApi\n",
|
47 |
-
"\n",
|
48 |
-
"'''\n",
|
49 |
-
"( path_in_repo: str\n",
|
50 |
-
"repo_id: str\n",
|
51 |
-
"token: typing.Optional[str] = None\n",
|
52 |
-
"repo_type: typing.Optional[str] = Nonerevision: typing.Optional[str] = Nonecommit_message: typing.Optional[str] = Nonecommit_description: typing.Optional[str] = Nonecreate_pr: typing.Optional[bool] = Noneparent_commit: typing.Optional[str] = None )\n",
|
53 |
-
"'''\n",
|
54 |
-
"\n"
|
55 |
-
]
|
56 |
-
},
|
57 |
-
{
|
58 |
-
"cell_type": "code",
|
59 |
-
"execution_count": 2,
|
60 |
-
"metadata": {},
|
61 |
-
"outputs": [],
|
62 |
-
"source": [
|
63 |
-
"res = API.delete_folder(path_in_repo='hub/', repo_id=REPO_ID, repo_type='space')"
|
64 |
-
]
|
65 |
-
},
|
66 |
-
{
|
67 |
-
"cell_type": "code",
|
68 |
-
"execution_count": 14,
|
69 |
-
"metadata": {},
|
70 |
-
"outputs": [
|
71 |
-
{
|
72 |
-
"ename": "EntryNotFoundError",
|
73 |
-
"evalue": "404 Client Error. (Request ID: Root=1-65c43c73-7771219478c3ca215705378d;6308513c-7fb2-4810-afa4-9ea734f21820)\n\nEntry Not Found for url: https://huggingface.co/api/datasets/chaeeunlee/test_results/commit/main.",
|
74 |
-
"output_type": "error",
|
75 |
-
"traceback": [
|
76 |
-
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
77 |
-
"\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)",
|
78 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py:286\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 285\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 286\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
79 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/requests/models.py:1021\u001b[0m, in \u001b[0;36mResponse.raise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m http_error_msg:\n\u001b[0;32m-> 1021\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HTTPError(http_error_msg, response\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n",
|
80 |
-
"\u001b[0;31mHTTPError\u001b[0m: 404 Client Error: Not Found for url: https://huggingface.co/api/datasets/chaeeunlee/test_results/commit/main",
|
81 |
-
"\nThe above exception was the direct cause of the following exception:\n",
|
82 |
-
"\u001b[0;31mEntryNotFoundError\u001b[0m Traceback (most recent call last)",
|
83 |
-
"Cell \u001b[0;32mIn[14], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mAPI\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdelete_folder\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath_in_repo\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mEleutherAI/\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrepo_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mRESULTS_REPO\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mdataset\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
84 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py:118\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 116\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
85 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/hf_api.py:4767\u001b[0m, in \u001b[0;36mHfApi.delete_folder\u001b[0;34m(self, path_in_repo, repo_id, token, repo_type, revision, commit_message, commit_description, create_pr, parent_commit)\u001b[0m\n\u001b[1;32m 4716\u001b[0m \u001b[38;5;129m@validate_hf_hub_args\u001b[39m\n\u001b[1;32m 4717\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdelete_folder\u001b[39m(\n\u001b[1;32m 4718\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 4728\u001b[0m parent_commit: Optional[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 4729\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m CommitInfo:\n\u001b[1;32m 4730\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 4731\u001b[0m \u001b[38;5;124;03m Deletes a folder in the given repo.\u001b[39;00m\n\u001b[1;32m 4732\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 4765\u001b[0m \u001b[38;5;124;03m especially useful if the repo is updated / committed to concurrently.\u001b[39;00m\n\u001b[1;32m 4766\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m-> 4767\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate_commit\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4768\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4769\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4770\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4771\u001b[0m \u001b[43m \u001b[49m\u001b[43moperations\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43mCommitOperationDelete\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath_in_repo\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpath_in_repo\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mis_folder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4772\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4773\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_message\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4774\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_message\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mcommit_message\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mis\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mDelete folder \u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mpath_in_repo\u001b[49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m with huggingface_hub\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 4775\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4776\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_description\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_description\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4777\u001b[0m \u001b[43m \u001b[49m\u001b[43mcreate_pr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcreate_pr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4778\u001b[0m \u001b[43m \u001b[49m\u001b[43mparent_commit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparent_commit\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4779\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
|
86 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py:118\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 116\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
87 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/hf_api.py:1208\u001b[0m, in \u001b[0;36mfuture_compatible.<locals>._inner\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1205\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrun_as_future(fn, \u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 1207\u001b[0m \u001b[38;5;66;03m# Otherwise, call the function normally\u001b[39;00m\n\u001b[0;32m-> 1208\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
88 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/hf_api.py:3600\u001b[0m, in \u001b[0;36mHfApi.create_commit\u001b[0;34m(self, repo_id, operations, commit_message, commit_description, token, repo_type, revision, create_pr, num_threads, parent_commit, run_as_future)\u001b[0m\n\u001b[1;32m 3598\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 3599\u001b[0m commit_resp \u001b[38;5;241m=\u001b[39m get_session()\u001b[38;5;241m.\u001b[39mpost(url\u001b[38;5;241m=\u001b[39mcommit_url, headers\u001b[38;5;241m=\u001b[39mheaders, data\u001b[38;5;241m=\u001b[39mdata, params\u001b[38;5;241m=\u001b[39mparams)\n\u001b[0;32m-> 3600\u001b[0m \u001b[43mhf_raise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcommit_resp\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mendpoint_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcommit\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3601\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m RepositoryNotFoundError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 3602\u001b[0m e\u001b[38;5;241m.\u001b[39mappend_to_message(_CREATE_COMMIT_NO_REPO_ERROR_MESSAGE)\n",
|
89 |
-
"File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py:296\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 294\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m error_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEntryNotFound\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 295\u001b[0m message \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39mstatus_code\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m Client Error.\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEntry Not Found for url: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m--> 296\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m EntryNotFoundError(message, response) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 298\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m error_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGatedRepo\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 299\u001b[0m message \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 300\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39mstatus_code\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m Client Error.\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot access gated repo for url \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 301\u001b[0m )\n",
|
90 |
-
"\u001b[0;31mEntryNotFoundError\u001b[0m: 404 Client Error. (Request ID: Root=1-65c43c73-7771219478c3ca215705378d;6308513c-7fb2-4810-afa4-9ea734f21820)\n\nEntry Not Found for url: https://huggingface.co/api/datasets/chaeeunlee/test_results/commit/main."
|
91 |
-
]
|
92 |
-
}
|
93 |
-
],
|
94 |
-
"source": [
|
95 |
-
"res = API.delete_folder(path_in_repo='EleutherAI/', repo_id=RESULTS_REPO, repo_type='dataset')"
|
96 |
-
]
|
97 |
-
},
|
98 |
-
{
|
99 |
-
"cell_type": "code",
|
100 |
-
"execution_count": 15,
|
101 |
-
"metadata": {},
|
102 |
-
"outputs": [],
|
103 |
-
"source": [
|
104 |
-
"res = API.delete_folder(path_in_repo='EleutherAI/pythia-70m_biolama_umls_eval_request_False_float32_Original.json', repo_id=QUEUE_REPO, repo_type='dataset')\n",
|
105 |
-
"# res = API.delete_folder(path_in_repo='mistralai/', repo_id=QUEUE_REPO, repo_type='dataset')\n",
|
106 |
-
"\n",
|
107 |
-
"# res = API.delete_file(path_in_repo=\"EleutherAI/pythia-70m_pubmedqa_eval_request_False_float32_Original.json\", repo_id=QUEUE_REPO, repo_type='dataset')\n"
|
108 |
-
]
|
109 |
-
},
|
110 |
-
{
|
111 |
-
"cell_type": "code",
|
112 |
-
"execution_count": null,
|
113 |
-
"metadata": {},
|
114 |
-
"outputs": [],
|
115 |
-
"source": [
|
116 |
-
"# import os\n",
|
117 |
-
"\n",
|
118 |
-
"# for root, _, files in os.walk(results_path):\n",
|
119 |
-
"# # We should only have json files in model results\n",
|
120 |
-
"# if len(files) == 0 or any([not f.endswith(\".json\") for f in files]):\n",
|
121 |
-
"# continue\n",
|
122 |
-
"\n",
|
123 |
-
"# # Sort the files by date\n",
|
124 |
-
"# try:\n",
|
125 |
-
"# files.sort(key=lambda x: x.removesuffix(\".json\").removeprefix(\"results_\")[:-7])\n",
|
126 |
-
"# except dateutil.parser._parser.ParserError:\n",
|
127 |
-
"# files = [files[-1]]\n",
|
128 |
-
"\n",
|
129 |
-
"\n",
|
130 |
-
"# print(f\"files = {files}\")\n",
|
131 |
-
"\n",
|
132 |
-
"# for file in files:\n",
|
133 |
-
"# model_result_filepaths.append(os.path.join(root, file))"
|
134 |
-
]
|
135 |
-
},
|
136 |
-
{
|
137 |
-
"cell_type": "code",
|
138 |
-
"execution_count": null,
|
139 |
-
"metadata": {},
|
140 |
-
"outputs": [
|
141 |
-
{
|
142 |
-
"name": "stdout",
|
143 |
-
"output_type": "stream",
|
144 |
-
"text": [
|
145 |
-
"DatasetInfo(id='chaeeunlee/test_requests', author='chaeeunlee', sha='c7f4d0c0b1207cc773dcd0b1df49cd6a883e02be', created_at=datetime.datetime(2024, 1, 31, 11, 19, 22, tzinfo=datetime.timezone.utc), last_modified=datetime.datetime(2024, 1, 31, 19, 55, 30, tzinfo=datetime.timezone.utc), private=False, gated=False, disabled=False, downloads=0, likes=0, paperswithcode_id=None, tags=['license:mit', 'region:us'], card_data={'annotations_creators': None, 'language_creators': None, 'language': None, 'license': 'mit', 'multilinguality': None, 'size_categories': None, 'source_datasets': None, 'task_categories': None, 'task_ids': None, 'paperswithcode_id': None, 'pretty_name': None, 'config_names': None, 'train_eval_index': None}, siblings=[RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None), RepoSibling(rfilename='EleutherAI/pythia-160m_eval_request_False_float32_Original.json', size=None, blob_id=None, lfs=None), RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)])\n"
|
146 |
-
]
|
147 |
-
}
|
148 |
-
],
|
149 |
-
"source": [
|
150 |
-
"info = API.dataset_info(repo_id=QUEUE_REPO)\n",
|
151 |
-
"print(info)"
|
152 |
-
]
|
153 |
-
},
|
154 |
-
{
|
155 |
-
"cell_type": "code",
|
156 |
-
"execution_count": 21,
|
157 |
-
"metadata": {},
|
158 |
-
"outputs": [],
|
159 |
-
"source": [
|
160 |
-
"from huggingface_hub import HfApi\n",
|
161 |
-
"\n",
|
162 |
-
"def print_repo_directory_structure(api, repo_id, is_dataset=True):\n",
|
163 |
-
" \"\"\"\n",
|
164 |
-
" Print the directory structure of a Hugging Face repository.\n",
|
165 |
-
"\n",
|
166 |
-
" Parameters:\n",
|
167 |
-
" - repo_id (str): Repository ID in the format \"username/reponame\".\n",
|
168 |
-
" \"\"\"\n",
|
169 |
-
" # api = HfApi()\n",
|
170 |
-
" if is_dataset:\n",
|
171 |
-
" repo_files = api.list_repo_files(repo_id=repo_id, repo_type='dataset')\n",
|
172 |
-
" else:\n",
|
173 |
-
" repo_files = api.list_repo_files(repo_id=repo_id)\n",
|
174 |
-
"\n",
|
175 |
-
"\n",
|
176 |
-
" print(f\"Directory structure of {repo_id}:\")\n",
|
177 |
-
" print()\n",
|
178 |
-
" for file_path in repo_files:\n",
|
179 |
-
" print(file_path)\n",
|
180 |
-
"\n"
|
181 |
-
]
|
182 |
-
},
|
183 |
-
{
|
184 |
-
"cell_type": "code",
|
185 |
-
"execution_count": 35,
|
186 |
-
"metadata": {},
|
187 |
-
"outputs": [
|
188 |
-
{
|
189 |
-
"name": "stdout",
|
190 |
-
"output_type": "stream",
|
191 |
-
"text": [
|
192 |
-
"Directory structure of chaeeunlee/test_requests:\n",
|
193 |
-
"\n",
|
194 |
-
".gitattributes\n",
|
195 |
-
"README.md\n"
|
196 |
-
]
|
197 |
-
}
|
198 |
-
],
|
199 |
-
"source": [
|
200 |
-
"repo_id = QUEUE_REPO # Replace with the target repository ID\n",
|
201 |
-
"print_repo_directory_structure(API, repo_id)"
|
202 |
-
]
|
203 |
-
}
|
204 |
-
],
|
205 |
-
"metadata": {
|
206 |
-
"kernelspec": {
|
207 |
-
"display_name": "lb",
|
208 |
-
"language": "python",
|
209 |
-
"name": "python3"
|
210 |
-
},
|
211 |
-
"language_info": {
|
212 |
-
"codemirror_mode": {
|
213 |
-
"name": "ipython",
|
214 |
-
"version": 3
|
215 |
-
},
|
216 |
-
"file_extension": ".py",
|
217 |
-
"mimetype": "text/x-python",
|
218 |
-
"name": "python",
|
219 |
-
"nbconvert_exporter": "python",
|
220 |
-
"pygments_lexer": "ipython3",
|
221 |
-
"version": "3.10.13"
|
222 |
-
}
|
223 |
-
},
|
224 |
-
"nbformat": 4,
|
225 |
-
"nbformat_minor": 2
|
226 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pyproject.toml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.ruff]
|
2 |
+
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
|
3 |
+
select = ["E", "F"]
|
4 |
+
ignore = ["E501"] # line too long (black is taking care of this)
|
5 |
+
line-length = 119
|
6 |
+
fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
|
7 |
+
|
8 |
+
[tool.isort]
|
9 |
+
profile = "black"
|
10 |
+
line_length = 119
|
11 |
+
|
12 |
+
[tool.black]
|
13 |
+
line-length = 119
|
requirements.txt
CHANGED
@@ -1,31 +1,15 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
semantic-version
|
17 |
-
tqdm
|
18 |
-
transformers>=4.36.0,<4.37.0
|
19 |
-
tokenizers>=0.15.0
|
20 |
-
lm_eval @ git+https://github.com/EleutherAI/lm-evaluation-harness.git
|
21 |
-
accelerate
|
22 |
-
sentencepiece
|
23 |
-
langdetect
|
24 |
-
sacrebleu
|
25 |
-
cchardet
|
26 |
-
rouge_score
|
27 |
-
bert-score
|
28 |
-
evaluate
|
29 |
-
spacy
|
30 |
-
selfcheckgpt
|
31 |
-
immutabledict
|
|
|
1 |
+
APScheduler==3.10.1
|
2 |
+
black==23.11.0
|
3 |
+
click==8.1.3
|
4 |
+
datasets==2.14.5
|
5 |
+
gradio==4.4.0
|
6 |
+
gradio_client==0.7.0
|
7 |
+
huggingface-hub>=0.18.0
|
8 |
+
matplotlib==3.7.1
|
9 |
+
numpy==1.24.2
|
10 |
+
pandas==2.0.0
|
11 |
+
python-dateutil==2.8.2
|
12 |
+
requests==2.28.2
|
13 |
+
tqdm==4.65.0
|
14 |
+
transformers==4.35.2
|
15 |
+
tokenizers>=0.15.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/create_request_file.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import pprint
|
4 |
+
import re
|
5 |
+
from datetime import datetime, timezone
|
6 |
+
|
7 |
+
import click
|
8 |
+
from colorama import Fore
|
9 |
+
from huggingface_hub import HfApi, snapshot_download
|
10 |
+
|
11 |
+
EVAL_REQUESTS_PATH = "eval-queue"
|
12 |
+
QUEUE_REPO = "open-llm-leaderboard/requests"
|
13 |
+
|
14 |
+
precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
|
15 |
+
model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
|
16 |
+
weight_types = ("Original", "Delta", "Adapter")
|
17 |
+
|
18 |
+
|
19 |
+
def get_model_size(model_info, precision: str):
|
20 |
+
size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
|
21 |
+
try:
|
22 |
+
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
23 |
+
except (AttributeError, TypeError):
|
24 |
+
try:
|
25 |
+
size_match = re.search(size_pattern, model_info.modelId.lower())
|
26 |
+
model_size = size_match.group(0)
|
27 |
+
model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
|
28 |
+
except AttributeError:
|
29 |
+
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
30 |
+
|
31 |
+
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
32 |
+
model_size = size_factor * model_size
|
33 |
+
return model_size
|
34 |
+
|
35 |
+
|
36 |
+
def main():
|
37 |
+
api = HfApi()
|
38 |
+
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
39 |
+
snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset")
|
40 |
+
|
41 |
+
model_name = click.prompt("Enter model name")
|
42 |
+
revision = click.prompt("Enter revision", default="main")
|
43 |
+
precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions))
|
44 |
+
model_type = click.prompt("Enter model type", type=click.Choice(model_types))
|
45 |
+
weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types))
|
46 |
+
base_model = click.prompt("Enter base model", default="")
|
47 |
+
status = click.prompt("Enter status", default="FINISHED")
|
48 |
+
|
49 |
+
try:
|
50 |
+
model_info = api.model_info(repo_id=model_name, revision=revision)
|
51 |
+
except Exception as e:
|
52 |
+
print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}")
|
53 |
+
return 1
|
54 |
+
|
55 |
+
model_size = get_model_size(model_info=model_info, precision=precision)
|
56 |
+
|
57 |
+
try:
|
58 |
+
license = model_info.cardData["license"]
|
59 |
+
except Exception:
|
60 |
+
license = "?"
|
61 |
+
|
62 |
+
eval_entry = {
|
63 |
+
"model": model_name,
|
64 |
+
"base_model": base_model,
|
65 |
+
"revision": revision,
|
66 |
+
"private": False,
|
67 |
+
"precision": precision,
|
68 |
+
"weight_type": weight_type,
|
69 |
+
"status": status,
|
70 |
+
"submitted_time": current_time,
|
71 |
+
"model_type": model_type,
|
72 |
+
"likes": model_info.likes,
|
73 |
+
"params": model_size,
|
74 |
+
"license": license,
|
75 |
+
}
|
76 |
+
|
77 |
+
user_name = ""
|
78 |
+
model_path = model_name
|
79 |
+
if "/" in model_name:
|
80 |
+
user_name = model_name.split("/")[0]
|
81 |
+
model_path = model_name.split("/")[1]
|
82 |
+
|
83 |
+
pprint.pprint(eval_entry)
|
84 |
+
|
85 |
+
if click.confirm("Do you want to continue? This request file will be pushed to the hub"):
|
86 |
+
click.echo("continuing...")
|
87 |
+
|
88 |
+
out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
89 |
+
os.makedirs(out_dir, exist_ok=True)
|
90 |
+
out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json"
|
91 |
+
|
92 |
+
with open(out_path, "w") as f:
|
93 |
+
f.write(json.dumps(eval_entry))
|
94 |
+
|
95 |
+
api.upload_file(
|
96 |
+
path_or_fileobj=out_path,
|
97 |
+
path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1],
|
98 |
+
repo_id=QUEUE_REPO,
|
99 |
+
repo_type="dataset",
|
100 |
+
commit_message=f"Add {model_name} to eval queue",
|
101 |
+
)
|
102 |
+
else:
|
103 |
+
click.echo("aborting...")
|
104 |
+
|
105 |
+
|
106 |
+
if __name__ == "__main__":
|
107 |
+
main()
|
src/.DS_Store
DELETED
Binary file (6.15 kB)
|
|
src/backend/.DS_Store
DELETED
Binary file (6.15 kB)
|
|
src/backend/envs.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
|
5 |
-
from dataclasses import dataclass
|
6 |
-
from enum import Enum
|
7 |
-
|
8 |
-
from src.envs import CACHE_PATH
|
9 |
-
|
10 |
-
|
11 |
-
@dataclass
|
12 |
-
class Task:
|
13 |
-
benchmark: str
|
14 |
-
# metric: str # yeah i don't think we need this.
|
15 |
-
col_name: str
|
16 |
-
num_fewshot: int
|
17 |
-
|
18 |
-
|
19 |
-
class Tasks(Enum):
|
20 |
-
|
21 |
-
task0 = Task("medmcqa", "MedMCQA", 0)
|
22 |
-
task1 = Task("medqa_4options", "MedQA", 0)
|
23 |
-
|
24 |
-
task2 = Task("anatomy (mmlu)", "MMLU Anatomy", 0)
|
25 |
-
task3 = Task("clinical_knowledge (mmlu)", "MMLU Clinical Knowledge", 0)
|
26 |
-
task4 = Task("college_biology (mmlu)", "MMLU College Biology", 0)
|
27 |
-
task5 = Task("college_medicine (mmlu)", "MMLU College Medicine", 0)
|
28 |
-
task6 = Task("medical_genetics (mmlu)", "MMLU Medical Genetics", 0)
|
29 |
-
task7 = Task("professional_medicine (mmlu)", "MMLU Professional Medicine", 0)
|
30 |
-
task8 = Task("pubmedqa", "PubMedQA", 0)
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
num_fewshots = {
|
35 |
-
"medmcqa": 0,
|
36 |
-
"medqa_4options": 0,
|
37 |
-
"anatomy (mmlu)":0,
|
38 |
-
"clinical_knowledge (mmlu)": 0,
|
39 |
-
"college_biology (mmlu)":0,
|
40 |
-
"college_medicine (mmlu)":0,
|
41 |
-
"medical_genetics (mmlu)":0,
|
42 |
-
"professional_medicine (mmlu)":0,
|
43 |
-
"pubmedqa":0,
|
44 |
-
}
|
45 |
-
|
46 |
-
|
47 |
-
# NUM_FEWSHOT = 64 # Change with your few shot
|
48 |
-
|
49 |
-
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
50 |
-
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
51 |
-
|
52 |
-
DEVICE = "cuda" if torch.cuda.is_available() else 'mps'
|
53 |
-
|
54 |
-
LIMIT = None # Testing; needs to be None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/backend/manage_requests.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import json
|
3 |
-
from dataclasses import dataclass
|
4 |
-
from typing import Optional, List
|
5 |
-
|
6 |
-
from huggingface_hub import HfApi, snapshot_download
|
7 |
-
|
8 |
-
from src.utils import my_snapshot_download
|
9 |
-
|
10 |
-
from lm_eval import tasks, evaluator, utils
|
11 |
-
|
12 |
-
from src.display.utils import Task
|
13 |
-
|
14 |
-
|
15 |
-
@dataclass
|
16 |
-
class EvalRequest:
|
17 |
-
model: str
|
18 |
-
|
19 |
-
## added
|
20 |
-
requested_tasks: List[Task] # dict?
|
21 |
-
|
22 |
-
|
23 |
-
private: bool
|
24 |
-
status: str
|
25 |
-
json_filepath: str
|
26 |
-
weight_type: str = "Original"
|
27 |
-
model_type: str = "" # pretrained, finetuned, with RL
|
28 |
-
precision: str = "" # float16, bfloat16
|
29 |
-
base_model: Optional[str] = None # for adapter models
|
30 |
-
revision: str = "main" # commit
|
31 |
-
submitted_time: Optional[str] = "2022-05-18T11:40:22.519222" # random date just so that we can still order requests by date
|
32 |
-
model_type: Optional[str] = None
|
33 |
-
likes: Optional[int] = 0
|
34 |
-
params: Optional[int] = None
|
35 |
-
license: Optional[str] = ""
|
36 |
-
|
37 |
-
## added by chaeeun
|
38 |
-
def get_user_requested_task_names(self) -> List[str]:
|
39 |
-
user_requested_tasks = self.requested_tasks
|
40 |
-
# print(f" {user_requested_tasks}")
|
41 |
-
|
42 |
-
task_names = [task['benchmark'] for task in user_requested_tasks]
|
43 |
-
|
44 |
-
return task_names
|
45 |
-
|
46 |
-
|
47 |
-
def get_model_args(self) -> str:
|
48 |
-
|
49 |
-
## added
|
50 |
-
if "gpt" in self.model:
|
51 |
-
model_args = f"model={self.model},revision={self.revision},parallelize=True"
|
52 |
-
else:
|
53 |
-
model_args = f"pretrained={self.model},revision={self.revision},parallelize=True"
|
54 |
-
|
55 |
-
if self.precision in ["float16", "float32", "bfloat16"]:
|
56 |
-
model_args += f",dtype={self.precision}"
|
57 |
-
# Quantized models need some added config, the install of bits and bytes, etc
|
58 |
-
#elif self.precision == "8bit":
|
59 |
-
# model_args += ",load_in_8bit=True"
|
60 |
-
#elif self.precision == "4bit":
|
61 |
-
# model_args += ",load_in_4bit=True"
|
62 |
-
#elif self.precision == "GPTQ":
|
63 |
-
# A GPTQ model does not need dtype to be specified,
|
64 |
-
# it will be inferred from the config
|
65 |
-
pass
|
66 |
-
else:
|
67 |
-
raise Exception(f"Unknown precision {self.precision}.")
|
68 |
-
|
69 |
-
return model_args
|
70 |
-
|
71 |
-
# set as in on remote repo!
|
72 |
-
def set_eval_request(api: HfApi, eval_request: EvalRequest, set_to_status: str, hf_repo: str, local_dir: str):
|
73 |
-
"""Updates a given eval request with its new status on the hub (running, completed, failed, ...)"""
|
74 |
-
json_filepath = eval_request.json_filepath
|
75 |
-
|
76 |
-
with open(json_filepath) as fp:
|
77 |
-
data = json.load(fp)
|
78 |
-
|
79 |
-
data["status"] = set_to_status
|
80 |
-
|
81 |
-
with open(json_filepath, "w") as f:
|
82 |
-
f.write(json.dumps(data))
|
83 |
-
|
84 |
-
api.upload_file(path_or_fileobj=json_filepath, path_in_repo=json_filepath.replace(local_dir, ""),
|
85 |
-
repo_id=hf_repo, repo_type="dataset")
|
86 |
-
|
87 |
-
# getting status from the remote repo as well.
|
88 |
-
def get_eval_requests(job_status: list, local_dir: str, hf_repo: str) -> list[EvalRequest]:
|
89 |
-
"""Get all pending evaluation requests and return a list in which private
|
90 |
-
models appearing first, followed by public models sorted by the number of
|
91 |
-
likes.
|
92 |
-
|
93 |
-
Returns:
|
94 |
-
`list[EvalRequest]`: a list of model info dicts.
|
95 |
-
"""
|
96 |
-
my_snapshot_download(repo_id=hf_repo, revision="main", local_dir=local_dir, repo_type="dataset", max_workers=60)
|
97 |
-
json_files = glob.glob(f"{local_dir}/**/*.json", recursive=True)
|
98 |
-
|
99 |
-
eval_requests = []
|
100 |
-
for json_filepath in json_files:
|
101 |
-
with open(json_filepath) as fp:
|
102 |
-
data = json.load(fp)
|
103 |
-
if data["status"] in job_status:
|
104 |
-
# import pdb
|
105 |
-
# breakpoint()
|
106 |
-
data["json_filepath"] = json_filepath
|
107 |
-
|
108 |
-
if 'job_id' in data:
|
109 |
-
del data['job_id']
|
110 |
-
|
111 |
-
print(f"data in get_eval_requests(): {data}")
|
112 |
-
|
113 |
-
eval_request = EvalRequest(**data)
|
114 |
-
eval_requests.append(eval_request)
|
115 |
-
|
116 |
-
print(f"eval_requests right before returning: {eval_requests}")
|
117 |
-
return eval_requests
|
118 |
-
|
119 |
-
# not entirely sure what this one does.
|
120 |
-
def check_completed_evals(api: HfApi, hf_repo: str, local_dir: str, checked_status: str, completed_status: str,
|
121 |
-
failed_status: str, hf_repo_results: str, local_dir_results: str):
|
122 |
-
"""Checks if the currently running evals are completed, if yes, update their status on the hub."""
|
123 |
-
my_snapshot_download(repo_id=hf_repo_results, revision="main", local_dir=local_dir_results, repo_type="dataset", max_workers=60)
|
124 |
-
|
125 |
-
running_evals = get_eval_requests([checked_status], hf_repo=hf_repo, local_dir=local_dir)
|
126 |
-
|
127 |
-
for eval_request in running_evals:
|
128 |
-
model = eval_request.model
|
129 |
-
print("====================================")
|
130 |
-
print(f"Checking {model}")
|
131 |
-
|
132 |
-
output_path = model
|
133 |
-
output_file = f"{local_dir_results}/{output_path}/results*.json"
|
134 |
-
output_file_exists = len(glob.glob(output_file)) > 0
|
135 |
-
|
136 |
-
if output_file_exists:
|
137 |
-
print(f"EXISTS output file exists for {model} setting it to {completed_status}")
|
138 |
-
set_eval_request(api, eval_request, completed_status, hf_repo, local_dir)
|
139 |
-
|
140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/backend/run_eval_suite.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
from lm_eval import tasks, evaluator, utils
|
2 |
-
from lm_eval.tasks import initialize_tasks, TaskManager
|
3 |
-
|
4 |
-
try:
|
5 |
-
from lm_eval.tasks import include_task_folder
|
6 |
-
except:
|
7 |
-
from lm_eval.tasks import include_path
|
8 |
-
|
9 |
-
from src.backend.manage_requests import EvalRequest
|
10 |
-
|
11 |
-
# from src.backend.tasks.xsum.task import XSum
|
12 |
-
# from src.backend.tasks.xsum.task_v2 import XSumv2
|
13 |
-
|
14 |
-
# from src.backend.tasks.cnndm.task import CNNDM
|
15 |
-
# from src.backend.tasks.cnndm.task_v2 import CNNDMv2
|
16 |
-
|
17 |
-
# from src.backend.tasks.selfcheckgpt.task import SelfCheckGpt
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
def run_evaluation(eval_request: EvalRequest, task_names, num_fewshot, batch_size, device, use_cache=None, limit=None, max_nb_samples=100) -> dict:
|
22 |
-
if limit:
|
23 |
-
print("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.")
|
24 |
-
|
25 |
-
|
26 |
-
# try:
|
27 |
-
# include_task_folder("src/backend/tasks/")
|
28 |
-
# except:
|
29 |
-
# include_path("src/backend/tasks")
|
30 |
-
|
31 |
-
# initialize_tasks('INFO')
|
32 |
-
# https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage
|
33 |
-
# indexes all tasks from the `lm_eval/tasks` subdirectory.
|
34 |
-
# Alternatively, you can set `TaskManager(include_path="path/to/my/custom/task/configs")`
|
35 |
-
# to include a set of tasks in a separate directory.
|
36 |
-
task_manager = TaskManager(include_path="src/backend/probing_tasks")
|
37 |
-
|
38 |
-
if "gpt" in eval_request.model:
|
39 |
-
model = "openai-chat-completions"
|
40 |
-
else:
|
41 |
-
model = "hf-auto"
|
42 |
-
|
43 |
-
print(f"Considered Tasks (after overriding): {task_names}")
|
44 |
-
|
45 |
-
print(f"model_args: {eval_request.get_model_args()}")
|
46 |
-
|
47 |
-
results = evaluator.simple_evaluate(model=model, # "hf-causal-experimental", # "hf-causal" how can i make this work for
|
48 |
-
model_args=eval_request.get_model_args(),
|
49 |
-
task_manager=task_manager,
|
50 |
-
tasks=task_names,
|
51 |
-
num_fewshot=num_fewshot,
|
52 |
-
batch_size=batch_size,
|
53 |
-
max_batch_size=8,
|
54 |
-
device=device,
|
55 |
-
use_cache=use_cache,
|
56 |
-
limit=limit,
|
57 |
-
|
58 |
-
# task_manager=task_manager,
|
59 |
-
# include_path="/Users/chaeeunlee/Documents/VSC_workspaces/biomed_probing_leaderboard/src/backend/tasks",
|
60 |
-
write_out=True)
|
61 |
-
|
62 |
-
results["config"]["model_dtype"] = eval_request.precision
|
63 |
-
results["config"]["model_name"] = eval_request.model
|
64 |
-
results["config"]["model_sha"] = eval_request.revision
|
65 |
-
|
66 |
-
if max_nb_samples is not None:
|
67 |
-
if 'samples' in results:
|
68 |
-
samples = results['samples']
|
69 |
-
for task_name in samples.keys():
|
70 |
-
if len(samples[task_name]) > max_nb_samples:
|
71 |
-
results['samples'][task_name] = results['samples'][task_name][:max_nb_samples]
|
72 |
-
|
73 |
-
# print(evaluator.make_table(results))
|
74 |
-
|
75 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/backend/sort_queue.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from huggingface_hub import HfApi
|
3 |
-
from src.backend.manage_requests import EvalRequest
|
4 |
-
|
5 |
-
|
6 |
-
@dataclass
|
7 |
-
class ModelMetadata:
|
8 |
-
likes: int = 0
|
9 |
-
size: int = 15
|
10 |
-
|
11 |
-
|
12 |
-
def sort_models_by_priority(api: HfApi, models: list[EvalRequest]) -> list[EvalRequest]:
|
13 |
-
private_models = [model for model in models if model.private]
|
14 |
-
public_models = [model for model in models if not model.private]
|
15 |
-
|
16 |
-
return sort_by_submit_date(private_models) + sort_by_submit_date(public_models)
|
17 |
-
|
18 |
-
|
19 |
-
def sort_by_submit_date(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
|
20 |
-
return sorted(eval_requests, key=lambda x: x.submitted_time, reverse=False)
|
21 |
-
|
22 |
-
|
23 |
-
def sort_by_size(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
|
24 |
-
return sorted(eval_requests, key=lambda x: x.params, reverse=False)
|
25 |
-
|
26 |
-
|
27 |
-
def sort_by_likes(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
|
28 |
-
return sorted(eval_requests, key=lambda x: x.likes, reverse=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/about.py
CHANGED
@@ -1,13 +1,46 @@
|
|
1 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
# <div style="text-align: center;">
|
4 |
-
# <img src="https://raw.githubusercontent.com/monk1337/MultiMedQA/main/assets/logs.png" alt="Descriptive Alt Text" style="display: block; margin: auto; height: 160px;">
|
5 |
-
# </div>
|
6 |
|
7 |
-
TITLE = """
|
8 |
-
<h1 align="center" style="color: #1a237e;"> Open Medical-LLM Leaderboard</h1>
|
9 |
"""
|
10 |
|
|
|
11 |
INTRODUCTION_TEXT = """
|
12 |
🩺 The Open Medical LLM Leaderboard aims to track, rank and evaluate the performance of large language models (LLMs) on medical question answering tasks. It evaluates LLMs across a diverse array of medical datasets, including MedQA (USMLE), PubMedQA, MedMCQA, and subsets of MMLU related to medicine and biology. The leaderboard offers a comprehensive assessment of each model's medical knowledge and question answering capabilities.
|
13 |
|
@@ -20,11 +53,25 @@ The backend of the Open Medical LLM Leaderboard uses the Eleuther AI Language Mo
|
|
20 |
|
21 |
LLM_BENCHMARKS_TEXT = f"""
|
22 |
|
23 |
-
|
|
|
24 |
Evaluating the medical knowledge and clinical reasoning capabilities of LLMs is crucial as they are increasingly being applied to healthcare and biomedical applications. The Open Medical LLM Leaderboard provides a platform to assess the latest LLMs on their performance on a variety of medical question answering tasks. This can help identify the strengths and gaps in medical understanding of current models.
|
25 |
|
26 |
-
How it works
|
|
|
27 |
📈 We evaluate the models on 9 medical Q&A datasets using the <a href="https://github.com/EleutherAI/lm-evaluation-harness" target="_blank"> Eleuther AI Language Model Evaluation Harness </a>, a unified framework to test language models on different tasks.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
"""
|
29 |
|
30 |
LLM_BENCHMARKS_DETAILS = f"""
|
@@ -55,9 +102,11 @@ python main.py --model=hf-auto --model_args="pretrained=<model>,revision=<revisi
|
|
55 |
Note some datasets may require additional setup, refer to the Evaluation Harness documentation. Adjust batch size based on your GPU memory if not using parallelism. Minor variations in results are expected with different batch sizes due to padding.
|
56 |
|
57 |
Icons
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
61 |
Missing icons indicate the model info is not yet added, feel free to open an issue to include it!
|
62 |
"""
|
63 |
|
@@ -113,4 +162,15 @@ year = {2024},
|
|
113 |
publisher = {Hugging Face},
|
114 |
howpublished = "\url{https://huggingface.co/spaces/openlifescienceai/open_medical_llm_leaderboard}"
|
115 |
}
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from enum import Enum
|
3 |
+
# from src.display.utils import ModelType
|
4 |
+
|
5 |
+
|
6 |
+
@dataclass
|
7 |
+
class Task:
|
8 |
+
benchmark: str
|
9 |
+
metric: str
|
10 |
+
col_name: str
|
11 |
+
|
12 |
+
|
13 |
+
# Init: to update with your specific keys
|
14 |
+
class Tasks(Enum):
|
15 |
+
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
16 |
+
task0 = Task("medmcqa", "acc,none", "MedMCQA")
|
17 |
+
task1 = Task("medqa_4options", "acc,none", "MedQA")
|
18 |
+
task2 = Task("mmlu_anatomy", "acc,none", "MMLU Anatomy")
|
19 |
+
task3 = Task("mmlu_clinical_knowledge", "acc,none", "MMLU Clinical Knowledge")
|
20 |
+
task4 = Task("mmlu_college_biology", "acc,none", "MMLU College Biology")
|
21 |
+
task5 = Task("mmlu_college_medicine", "acc,none", "MMLU College Medicine")
|
22 |
+
task6 = Task("mmlu_medical_genetics", "acc,none", "MMLU Medical Genetics")
|
23 |
+
task7 = Task("mmlu_professional_medicine", "acc,none", "MMLU Professional Medicine")
|
24 |
+
task8 = Task("pubmedqa", "acc,none", "PubMedQA")
|
25 |
+
|
26 |
+
|
27 |
+
# "medmcqa", "acc,none", "MedMCQA"
|
28 |
+
|
29 |
+
# Your leaderboard name
|
30 |
+
# <h1 align="center" style="color: #1a237e;"> Open Medical-LLM Leaderboard</h1>
|
31 |
+
TITLE = """
|
32 |
+
|
33 |
+
|
34 |
+
<div style="text-align: center; margin-bottom: 20px;">
|
35 |
+
<img src="https://raw.githubusercontent.com/monk1337/MultiMedQA/main/assets/logs.png" alt="Descriptive Alt Text" style="display: block; margin: auto; height: 160px;">
|
36 |
+
</div>
|
37 |
+
|
38 |
+
<h1 align="center" style="color: #1a237e; font-size: 40px;">Open <span style="color: #990001;">Medical-LLM</span> Leaderboard</h1>
|
39 |
|
|
|
|
|
|
|
40 |
|
|
|
|
|
41 |
"""
|
42 |
|
43 |
+
# What does your leaderboard evaluate?
|
44 |
INTRODUCTION_TEXT = """
|
45 |
🩺 The Open Medical LLM Leaderboard aims to track, rank and evaluate the performance of large language models (LLMs) on medical question answering tasks. It evaluates LLMs across a diverse array of medical datasets, including MedQA (USMLE), PubMedQA, MedMCQA, and subsets of MMLU related to medicine and biology. The leaderboard offers a comprehensive assessment of each model's medical knowledge and question answering capabilities.
|
46 |
|
|
|
53 |
|
54 |
LLM_BENCHMARKS_TEXT = f"""
|
55 |
|
56 |
+
<h2 style="color: #2c3e50;"> Why Leaderboard? </h2>
|
57 |
+
|
58 |
Evaluating the medical knowledge and clinical reasoning capabilities of LLMs is crucial as they are increasingly being applied to healthcare and biomedical applications. The Open Medical LLM Leaderboard provides a platform to assess the latest LLMs on their performance on a variety of medical question answering tasks. This can help identify the strengths and gaps in medical understanding of current models.
|
59 |
|
60 |
+
<h2 style="color: #2c3e50;">How it works</h2>
|
61 |
+
|
62 |
📈 We evaluate the models on 9 medical Q&A datasets using the <a href="https://github.com/EleutherAI/lm-evaluation-harness" target="_blank"> Eleuther AI Language Model Evaluation Harness </a>, a unified framework to test language models on different tasks.
|
63 |
+
|
64 |
+
<h2 style="color: #2c3e50;">About Open Life Science AI</h2>
|
65 |
+
An Open Life Science Project to Benchmark and Track AI Progress, Share Models and Datasets in the Life Science Field.
|
66 |
+
<a href="https://openlifescience.ai/" target="_blank"> More info </a>
|
67 |
+
|
68 |
+
|
69 |
+
<h2 style="color: #2c3e50;">Datasets</h2>
|
70 |
+
|
71 |
+
<div style="font-family: Arial, sans-serif; line-height: 1.6; color: #333;"> <ul style="list-style-type: none; padding: 0;"> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/2009.13081" target="_blank" style="color: #3498db;">MedQA (USMLE)</a></h3> <p>1273 real-world questions from the US Medical License Exams (USMLE) to test general medical knowledge</p> </li> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/1909.06146" target="_blank" style="color: #3498db;">PubMedQA</a></h3> <p>500 questions constructed from PubMed article titles along with the abstracts as context to test understanding of biomedical research</p> </li> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://proceedings.mlr.press/v174/pal22a.html" target="_blank" style="color: #3498db;">MedMCQA</a></h3> <p>4183 questions from Indian medical entrance exams (AIIMS & NEET PG) spanning 2.4k healthcare topics</p> </li> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/2009.03300" target="_blank" style="color: #3498db;">MMLU-Clinical knowledge</a></h3> <p>265 multiple choice questions on clinical knowledge</p> </li> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/2009.03300" target="_blank" style="color: #3498db;">MMLU-Medical genetics</a></h3> <p>100 MCQs on medical genetics</p> </li> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/2009.03300" target="_blank" style="color: #3498db;">MMLU-Anatomy</a></h3> <p>135 anatomy MCQs</p> </li> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/2009.03300" target="_blank" style="color: #3498db;">MMLU-Professional medicine</a></h3> <p>272 MCQs on professional medicine</p> </li> <li style="margin-bottom: 20px;"> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/2009.03300" target="_blank" style="color: #3498db;">MMLU-College biology</a></h3> <p>144 MCQs on college-level biology</p> </li> <li> <h3 style="color: #2c3e50; margin-bottom: 5px;"><a href="https://arxiv.org/abs/2009.03300" target="_blank" style="color: #3498db;">MMLU-College medicine</a></h3> <p>173 college medicine MCQs</p> </li> </ul> </div>
|
72 |
+
|
73 |
+
<div style="font-family: Arial, sans-serif; line-height: 1.6; color: #333;"> <h2 style="color: #2c3e50;">Evaluation Metric</h2> <p>Metric Accuracy (ACC) is used as the main evaluation metric across all datasets.</p> <h2 style="color: #2c3e50;">Details and Logs</h2> <p>Detailed results are available in the results directory:</p> <a href="https://huggingface.co/datasets/openlifescienceai/results" target="_blank" style="color: #3498db;">https://huggingface.co/datasets/openlifescienceai/results</a> <p>Input/outputs for each model can be found in the details page accessible by clicking the 📄 emoji next to the model name.</p> <h2 style="color: #2c3e50;">Reproducibility</h2> <p>To reproduce the results, you can run this evaluation script:</p> <pre style="background-color: #f0f0f0; padding: 10px; border-radius: 5px;">python eval_medical_llm.py</pre> <p>To evaluate a specific dataset on a model, use the EleutherAI LLM Evaluation Harness:</p> <pre style="background-color: #f0f0f0; padding: 10px; border-radius: 5px;">python main.py --model=hf-auto --model_args="pretrained=<model>,revision=<revision>,parallelize=True" --tasks=<dataset> --num_fewshot=<n_shots> --batch_size=1 --output_path=<output_dir></pre> <p>Note some datasets may require additional setup, refer to the Evaluation Harness documentation.</p> <p>Adjust batch size based on your GPU memory if not using parallelism. Minor variations in results are expected with different batch sizes due to padding.</p> <h2 style="color: #2c3e50;">Icons</h2> <ul style="list-style-type: none; padding: 0;"> <li>🟢 Pre-trained model</li> <li>🔶 Fine-tuned model</li> <li>? Unknown model type</li> <li>⭕ Instruction-tuned</li> <li>🟦 RL-tuned</li> </ul> <p>Missing icons indicate the model info is not yet added, feel free to open an issue to include it!</p> </div>
|
74 |
+
|
75 |
"""
|
76 |
|
77 |
LLM_BENCHMARKS_DETAILS = f"""
|
|
|
102 |
Note some datasets may require additional setup, refer to the Evaluation Harness documentation. Adjust batch size based on your GPU memory if not using parallelism. Minor variations in results are expected with different batch sizes due to padding.
|
103 |
|
104 |
Icons
|
105 |
+
🟢 Pre-trained model
|
106 |
+
🔶 Fine-tuned model
|
107 |
+
? Unknown model type
|
108 |
+
⭕ instruction-tuned
|
109 |
+
🟦 RL-tuned
|
110 |
Missing icons indicate the model info is not yet added, feel free to open an issue to include it!
|
111 |
"""
|
112 |
|
|
|
162 |
publisher = {Hugging Face},
|
163 |
howpublished = "\url{https://huggingface.co/spaces/openlifescienceai/open_medical_llm_leaderboard}"
|
164 |
}
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
@misc{singhal2023expertlevel,
|
169 |
+
title={Towards Expert-Level Medical Question Answering with Large Language Models},
|
170 |
+
author={Karan Singhal et al.},
|
171 |
+
year={2023},
|
172 |
+
eprint={2305.09617},
|
173 |
+
archivePrefix={arXiv},
|
174 |
+
primaryClass={cs.CL}
|
175 |
+
}
|
176 |
+
"""
|
src/display/css_html_js.py
CHANGED
@@ -1,9 +1,5 @@
|
|
1 |
custom_css = """
|
2 |
|
3 |
-
.gradio-container {
|
4 |
-
max-width: 100%!important;
|
5 |
-
}
|
6 |
-
|
7 |
.markdown-text {
|
8 |
font-size: 16px !important;
|
9 |
}
|
|
|
1 |
custom_css = """
|
2 |
|
|
|
|
|
|
|
|
|
3 |
.markdown-text {
|
4 |
font-size: 16px !important;
|
5 |
}
|
src/display/formatting.py
CHANGED
@@ -7,18 +7,12 @@ from huggingface_hub.hf_api import ModelInfo
|
|
7 |
|
8 |
API = HfApi()
|
9 |
|
10 |
-
|
11 |
def model_hyperlink(link, model_name):
|
12 |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
13 |
|
14 |
|
15 |
def make_clickable_model(model_name):
|
16 |
link = f"https://huggingface.co/{model_name}"
|
17 |
-
|
18 |
-
# details_model_name = model_name.replace("/", "__")
|
19 |
-
# details_link = f"https://huggingface.co/datasets/open-llm-leaderboard/details_{details_model_name}"
|
20 |
-
|
21 |
-
# return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "📑")
|
22 |
return model_hyperlink(link, model_name)
|
23 |
|
24 |
|
|
|
7 |
|
8 |
API = HfApi()
|
9 |
|
|
|
10 |
def model_hyperlink(link, model_name):
|
11 |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
12 |
|
13 |
|
14 |
def make_clickable_model(model_name):
|
15 |
link = f"https://huggingface.co/{model_name}"
|
|
|
|
|
|
|
|
|
|
|
16 |
return model_hyperlink(link, model_name)
|
17 |
|
18 |
|
src/display/utils.py
CHANGED
@@ -3,30 +3,12 @@ from enum import Enum
|
|
3 |
|
4 |
import pandas as pd
|
5 |
|
|
|
6 |
|
7 |
def fields(raw_class):
|
8 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
9 |
|
10 |
|
11 |
-
@dataclass
|
12 |
-
class Task:
|
13 |
-
benchmark: str
|
14 |
-
metric: str
|
15 |
-
col_name: str
|
16 |
-
|
17 |
-
|
18 |
-
class Tasks(Enum):
|
19 |
-
medmcqa = Task("medmcqa", "acc", "MedMCQA")
|
20 |
-
medqa = Task("medqa_4options", "acc", "MedQA")
|
21 |
-
|
22 |
-
mmlu_anatomy = Task("anatomy (mmlu)", "acc", "MMLU Anatomy")
|
23 |
-
mmlu_ck = Task("clinical_knowledge (mmlu)", "acc", "MMLU Clinical Knowledge")
|
24 |
-
mmlu_cb = Task("college_biology (mmlu)", "acc", "MMLU College Biology")
|
25 |
-
mmlu_cm = Task("college_medicine (mmlu)", "acc", "MMLU College Medicine")
|
26 |
-
mmlu_mg = Task("medical_genetics (mmlu)", "acc", "MMLU Medical Genetics")
|
27 |
-
mmlu_pm = Task("professional_medicine (mmlu)", "acc", "MMLU Professional Medicine")
|
28 |
-
pubmedqa = Task("pubmedqa", "acc", "PubMedQA")
|
29 |
-
|
30 |
# These classes are for user facing column names,
|
31 |
# to avoid having to change them all around the code
|
32 |
# when a modif is needed
|
@@ -38,16 +20,16 @@ class ColumnContent:
|
|
38 |
hidden: bool = False
|
39 |
never_hidden: bool = False
|
40 |
dummy: bool = False
|
41 |
-
is_task: bool = False
|
42 |
|
|
|
43 |
auto_eval_column_dict = []
|
44 |
# Init
|
45 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
46 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
47 |
#Scores
|
48 |
-
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("
|
49 |
for task in Tasks:
|
50 |
-
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True
|
51 |
# Model information
|
52 |
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
53 |
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
@@ -64,7 +46,7 @@ auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_
|
|
64 |
# We use make dataclass to dynamically fill the scores from Tasks
|
65 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
66 |
|
67 |
-
|
68 |
@dataclass(frozen=True)
|
69 |
class EvalQueueColumn: # Queue column
|
70 |
model = ColumnContent("model", "markdown", True)
|
@@ -74,11 +56,12 @@ class EvalQueueColumn: # Queue column
|
|
74 |
weight_type = ColumnContent("weight_type", "str", "Original")
|
75 |
status = ColumnContent("status", "str", True)
|
76 |
|
77 |
-
|
78 |
@dataclass
|
79 |
class ModelDetails:
|
80 |
name: str
|
81 |
-
|
|
|
82 |
|
83 |
|
84 |
class ModelType(Enum):
|
@@ -103,18 +86,12 @@ class ModelType(Enum):
|
|
103 |
return ModelType.IFT
|
104 |
return ModelType.Unknown
|
105 |
|
106 |
-
|
107 |
class WeightType(Enum):
|
108 |
Adapter = ModelDetails("Adapter")
|
109 |
Original = ModelDetails("Original")
|
110 |
Delta = ModelDetails("Delta")
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
class Precision(Enum):
|
117 |
-
float32 = ModelDetails("float32")
|
118 |
float16 = ModelDetails("float16")
|
119 |
bfloat16 = ModelDetails("bfloat16")
|
120 |
qt_8bit = ModelDetails("8bit")
|
@@ -122,10 +99,7 @@ class Precision(Enum):
|
|
122 |
qt_GPTQ = ModelDetails("GPTQ")
|
123 |
Unknown = ModelDetails("?")
|
124 |
|
125 |
-
|
126 |
-
def from_str(precision: str):
|
127 |
-
if precision in ["torch.float32", "float32"]:
|
128 |
-
return Precision.float32
|
129 |
if precision in ["torch.float16", "float16"]:
|
130 |
return Precision.float16
|
131 |
if precision in ["torch.bfloat16", "bfloat16"]:
|
@@ -137,7 +111,6 @@ class Precision(Enum):
|
|
137 |
if precision in ["GPTQ", "None"]:
|
138 |
return Precision.qt_GPTQ
|
139 |
return Precision.Unknown
|
140 |
-
|
141 |
|
142 |
# Column selection
|
143 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
|
|
3 |
|
4 |
import pandas as pd
|
5 |
|
6 |
+
from src.display.about import Tasks
|
7 |
|
8 |
def fields(raw_class):
|
9 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
# These classes are for user facing column names,
|
13 |
# to avoid having to change them all around the code
|
14 |
# when a modif is needed
|
|
|
20 |
hidden: bool = False
|
21 |
never_hidden: bool = False
|
22 |
dummy: bool = False
|
|
|
23 |
|
24 |
+
## Leaderboard columns
|
25 |
auto_eval_column_dict = []
|
26 |
# Init
|
27 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
28 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
29 |
#Scores
|
30 |
+
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
31 |
for task in Tasks:
|
32 |
+
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
33 |
# Model information
|
34 |
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
35 |
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
|
|
46 |
# We use make dataclass to dynamically fill the scores from Tasks
|
47 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
48 |
|
49 |
+
## For the queue columns in the submission tab
|
50 |
@dataclass(frozen=True)
|
51 |
class EvalQueueColumn: # Queue column
|
52 |
model = ColumnContent("model", "markdown", True)
|
|
|
56 |
weight_type = ColumnContent("weight_type", "str", "Original")
|
57 |
status = ColumnContent("status", "str", True)
|
58 |
|
59 |
+
## All the model information that we might need
|
60 |
@dataclass
|
61 |
class ModelDetails:
|
62 |
name: str
|
63 |
+
display_name: str = ""
|
64 |
+
symbol: str = "" # emoji
|
65 |
|
66 |
|
67 |
class ModelType(Enum):
|
|
|
86 |
return ModelType.IFT
|
87 |
return ModelType.Unknown
|
88 |
|
|
|
89 |
class WeightType(Enum):
|
90 |
Adapter = ModelDetails("Adapter")
|
91 |
Original = ModelDetails("Original")
|
92 |
Delta = ModelDetails("Delta")
|
93 |
|
|
|
|
|
|
|
|
|
94 |
class Precision(Enum):
|
|
|
95 |
float16 = ModelDetails("float16")
|
96 |
bfloat16 = ModelDetails("bfloat16")
|
97 |
qt_8bit = ModelDetails("8bit")
|
|
|
99 |
qt_GPTQ = ModelDetails("GPTQ")
|
100 |
Unknown = ModelDetails("?")
|
101 |
|
102 |
+
def from_str(precision):
|
|
|
|
|
|
|
103 |
if precision in ["torch.float16", "float16"]:
|
104 |
return Precision.float16
|
105 |
if precision in ["torch.bfloat16", "bfloat16"]:
|
|
|
111 |
if precision in ["GPTQ", "None"]:
|
112 |
return Precision.qt_GPTQ
|
113 |
return Precision.Unknown
|
|
|
114 |
|
115 |
# Column selection
|
116 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
src/envs.py
CHANGED
@@ -2,38 +2,19 @@ import os
|
|
2 |
|
3 |
from huggingface_hub import HfApi
|
4 |
|
|
|
|
|
5 |
|
6 |
-
|
|
|
|
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
QUEUE_REPO = "openlifescienceai/test_requests"
|
12 |
-
RESULTS_REPO = "openlifescienceai/test_results"
|
13 |
-
|
14 |
-
# have not created these repos yet
|
15 |
-
PRIVATE_QUEUE_REPO = "openlifescienceai/test_private-requests"
|
16 |
-
PRIVATE_RESULTS_REPO = "openlifescienceai/test_private-results"
|
17 |
-
|
18 |
-
IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
|
19 |
-
|
20 |
-
# CACHE_PATH = "/Users/chaeeunlee/Documents/VSC_workspaces/test_leaderboard" #
|
21 |
-
CACHE_PATH = os.getenv("HF_HOME", ".")
|
22 |
-
|
23 |
-
print(f"CACHE_PATH = {CACHE_PATH}")
|
24 |
|
|
|
25 |
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
26 |
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
27 |
|
28 |
-
|
29 |
-
EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
|
30 |
-
|
31 |
-
# PATH_TO_COLLECTION = "hallucinations-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03" # ??
|
32 |
-
|
33 |
-
# Rate limit variables
|
34 |
-
RATE_LIMIT_PERIOD = 7
|
35 |
-
RATE_LIMIT_QUOTA = 5
|
36 |
-
HAS_HIGHER_RATE_LIMIT = ["TheBloke"]
|
37 |
-
|
38 |
-
API = HfApi(token=H4_TOKEN)
|
39 |
-
# API = HfApi()
|
|
|
2 |
|
3 |
from huggingface_hub import HfApi
|
4 |
|
5 |
+
# clone / pull the lmeh eval data
|
6 |
+
TOKEN = os.environ.get("TOKEN", None)
|
7 |
|
8 |
+
OWNER = "openlifescienceai"
|
9 |
+
REPO_ID = f"{OWNER}/open_medical_llm_leaderboard"
|
10 |
+
QUEUE_REPO = f"{OWNER}/requests"
|
11 |
+
RESULTS_REPO = f"{OWNER}/results"
|
12 |
|
13 |
+
CACHE_PATH=os.getenv("HF_HOME", ".")
|
14 |
+
# print("CACHE_PATH", CACHE_PATH)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
# Local caches
|
17 |
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
18 |
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
19 |
|
20 |
+
API = HfApi(token=TOKEN)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/filter_models.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
from src.display.formatting import model_hyperlink
|
2 |
-
from src.display.utils import AutoEvalColumn
|
3 |
-
|
4 |
-
# Models which have been flagged by users as being problematic for a reason or another
|
5 |
-
# (Model name to forum discussion link)
|
6 |
-
FLAGGED_MODELS = {
|
7 |
-
"Voicelab/trurl-2-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/202",
|
8 |
-
"deepnight-research/llama-2-70B-inst": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/207",
|
9 |
-
"Aspik101/trurl-2-13b-pl-instruct_unload": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/213",
|
10 |
-
"Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236",
|
11 |
-
"TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237",
|
12 |
-
"gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215",
|
13 |
-
"AIDC-ai-business/Marcoroni-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
|
14 |
-
"AIDC-ai-business/Marcoroni-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
|
15 |
-
"AIDC-ai-business/Marcoroni-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
|
16 |
-
}
|
17 |
-
|
18 |
-
# Models which have been requested by orgs to not be submitted on the leaderboard
|
19 |
-
DO_NOT_SUBMIT_MODELS = [
|
20 |
-
"Voicelab/trurl-2-13b", # trained on MMLU
|
21 |
-
]
|
22 |
-
|
23 |
-
|
24 |
-
def flag_models(leaderboard_data: list[dict]):
|
25 |
-
for model_data in leaderboard_data:
|
26 |
-
if model_data["model_name_for_query"] in FLAGGED_MODELS:
|
27 |
-
issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
|
28 |
-
issue_link = model_hyperlink(
|
29 |
-
FLAGGED_MODELS[model_data["model_name_for_query"]],
|
30 |
-
f"See discussion #{issue_num}",
|
31 |
-
)
|
32 |
-
model_data[
|
33 |
-
AutoEvalColumn.model.name
|
34 |
-
] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
|
35 |
-
|
36 |
-
|
37 |
-
def remove_forbidden_models(leaderboard_data: list[dict]):
|
38 |
-
indices_to_remove = []
|
39 |
-
for ix, model in enumerate(leaderboard_data):
|
40 |
-
if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS:
|
41 |
-
indices_to_remove.append(ix)
|
42 |
-
|
43 |
-
for ix in reversed(indices_to_remove):
|
44 |
-
leaderboard_data.pop(ix)
|
45 |
-
return leaderboard_data
|
46 |
-
|
47 |
-
|
48 |
-
def filter_models(leaderboard_data: list[dict]):
|
49 |
-
leaderboard_data = remove_forbidden_models(leaderboard_data)
|
50 |
-
flag_models(leaderboard_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/read_evals.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import glob
|
2 |
import json
|
|
|
3 |
import os
|
4 |
from dataclasses import dataclass
|
5 |
|
@@ -13,7 +14,6 @@ from src.submission.check_validity import is_model_on_hub
|
|
13 |
|
14 |
@dataclass
|
15 |
class EvalResult:
|
16 |
-
# Also see src.display.utils.AutoEvalColumn for what will be displayed.
|
17 |
eval_name: str # org_model_precision (uid)
|
18 |
full_model: str # org/model (path on hub)
|
19 |
org: str
|
@@ -23,21 +23,20 @@ class EvalResult:
|
|
23 |
precision: Precision = Precision.Unknown
|
24 |
model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
25 |
weight_type: WeightType = WeightType.Original # Original or Adapter
|
26 |
-
architecture: str = "Unknown"
|
27 |
license: str = "?"
|
28 |
likes: int = 0
|
29 |
num_params: int = 0
|
30 |
date: str = "" # submission date of request file
|
31 |
still_on_hub: bool = False
|
32 |
|
33 |
-
@
|
34 |
-
def init_from_json_file(
|
35 |
"""Inits the result from the specific model result file"""
|
36 |
with open(json_filepath) as fp:
|
37 |
data = json.load(fp)
|
38 |
|
39 |
-
|
40 |
-
config = data.get("config", data.get("config_general", None))
|
41 |
|
42 |
# Precision
|
43 |
precision = Precision.from_str(config.get("model_dtype"))
|
@@ -56,7 +55,9 @@ class EvalResult:
|
|
56 |
result_key = f"{org}_{model}_{precision.value.name}"
|
57 |
full_model = "/".join(org_and_model)
|
58 |
|
59 |
-
still_on_hub,
|
|
|
|
|
60 |
architecture = "?"
|
61 |
if model_config is not None:
|
62 |
architectures = getattr(model_config, "architectures", None)
|
@@ -65,47 +66,28 @@ class EvalResult:
|
|
65 |
|
66 |
# Extract results available in this file (some results are split in several files)
|
67 |
results = {}
|
68 |
-
|
69 |
-
task_iterator = Tasks
|
70 |
-
if is_backend is True:
|
71 |
-
from src.backend.envs import Tasks as BackendTasks
|
72 |
-
task_iterator = BackendTasks
|
73 |
-
|
74 |
-
for task in task_iterator:
|
75 |
task = task.value
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
res_copy = results.copy()
|
80 |
-
|
81 |
-
for task_name in res_copy.keys():
|
82 |
-
entry_copy = results[task_name].copy()
|
83 |
-
|
84 |
-
for k, v in entry_copy.items():
|
85 |
-
if "exact_match" in k:
|
86 |
-
results[task_name][k.replace("exact_match", "em")] = v
|
87 |
-
|
88 |
-
entry_copy = results[task_name].copy()
|
89 |
-
|
90 |
-
for k, v in entry_copy.items():
|
91 |
-
if "," in k:
|
92 |
-
tokens = k.split(",")
|
93 |
-
results[task_name][tokens[0]] = v
|
94 |
-
|
95 |
-
return results
|
96 |
-
|
97 |
-
accs = np.array([v.get(task.metric, None) for k, v in post_process_results(data["results"]).items() if task.benchmark in k])
|
98 |
-
|
99 |
if accs.size == 0 or any([acc is None for acc in accs]):
|
100 |
continue
|
101 |
|
102 |
mean_acc = np.mean(accs) * 100.0
|
103 |
-
mean_acc = round(mean_acc, 2)
|
104 |
results[task.benchmark] = mean_acc
|
105 |
|
106 |
-
return
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
def update_with_request_file(self, requests_path):
|
111 |
"""Finds the relevant request file for the current model and updates info with it"""
|
@@ -120,19 +102,12 @@ class EvalResult:
|
|
120 |
self.likes = request.get("likes", 0)
|
121 |
self.num_params = request.get("params", 0)
|
122 |
self.date = request.get("submitted_time", "")
|
123 |
-
except Exception
|
124 |
-
print(f"Could not find request file for {self.org}/{self.model}
|
125 |
-
|
126 |
-
def is_complete(self) -> bool:
|
127 |
-
for task in Tasks:
|
128 |
-
if task.value.benchmark not in self.results:
|
129 |
-
return False
|
130 |
-
return True
|
131 |
|
132 |
def to_dict(self):
|
133 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
134 |
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
135 |
-
average = round(average, 2)
|
136 |
data_dict = {
|
137 |
"eval_name": self.eval_name, # not a column, just a save name,
|
138 |
AutoEvalColumn.precision.name: self.precision.value.name,
|
@@ -151,48 +126,42 @@ class EvalResult:
|
|
151 |
}
|
152 |
|
153 |
for task in Tasks:
|
154 |
-
|
155 |
-
data_dict[task.value.col_name] = self.results[task.value.benchmark]
|
156 |
|
157 |
return data_dict
|
158 |
|
159 |
|
160 |
def get_request_file_for_model(requests_path, model_name, precision):
|
161 |
-
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED
|
162 |
request_files = os.path.join(
|
163 |
requests_path,
|
164 |
f"{model_name}_eval_request_*.json",
|
165 |
)
|
166 |
request_files = glob.glob(request_files)
|
167 |
|
|
|
|
|
168 |
# Select correct request file (precision)
|
169 |
request_file = ""
|
170 |
request_files = sorted(request_files, reverse=True)
|
171 |
-
# print('XXX', request_files)
|
172 |
for tmp_request_file in request_files:
|
173 |
with open(tmp_request_file, "r") as f:
|
174 |
req_content = json.load(f)
|
175 |
if (
|
176 |
-
|
177 |
-
req_content["precision"] == precision.split(".")[-1]
|
178 |
):
|
179 |
request_file = tmp_request_file
|
180 |
return request_file
|
181 |
|
182 |
|
183 |
-
def get_raw_eval_results(results_path: str, requests_path: str
|
184 |
"""From the path of the results folder root, extract all needed info for results"""
|
185 |
model_result_filepaths = []
|
186 |
|
187 |
-
print(f"results_path: {results_path}")
|
188 |
-
|
189 |
-
walked_list = list(os.walk(results_path))
|
190 |
-
print(f"len(walked_list): {len(walked_list)}") # 1
|
191 |
-
|
192 |
for root, _, files in os.walk(results_path):
|
193 |
# We should only have json files in model results
|
194 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
195 |
-
print("negative condition met")
|
196 |
continue
|
197 |
|
198 |
# Sort the files by date
|
@@ -201,16 +170,13 @@ def get_raw_eval_results(results_path: str, requests_path: str, is_backend: bool
|
|
201 |
except dateutil.parser._parser.ParserError:
|
202 |
files = [files[-1]]
|
203 |
|
204 |
-
|
205 |
-
print(f"files = {files}")
|
206 |
-
|
207 |
for file in files:
|
208 |
model_result_filepaths.append(os.path.join(root, file))
|
209 |
|
210 |
eval_results = {}
|
211 |
for model_result_filepath in model_result_filepaths:
|
212 |
# Creation of result
|
213 |
-
eval_result = EvalResult.init_from_json_file(model_result_filepath
|
214 |
eval_result.update_with_request_file(requests_path)
|
215 |
|
216 |
# Store results of same eval together
|
@@ -222,7 +188,10 @@ def get_raw_eval_results(results_path: str, requests_path: str, is_backend: bool
|
|
222 |
|
223 |
results = []
|
224 |
for v in eval_results.values():
|
225 |
-
|
|
|
|
|
|
|
|
|
226 |
|
227 |
-
print(f"results = {results}")
|
228 |
return results
|
|
|
1 |
import glob
|
2 |
import json
|
3 |
+
import math
|
4 |
import os
|
5 |
from dataclasses import dataclass
|
6 |
|
|
|
14 |
|
15 |
@dataclass
|
16 |
class EvalResult:
|
|
|
17 |
eval_name: str # org_model_precision (uid)
|
18 |
full_model: str # org/model (path on hub)
|
19 |
org: str
|
|
|
23 |
precision: Precision = Precision.Unknown
|
24 |
model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
25 |
weight_type: WeightType = WeightType.Original # Original or Adapter
|
26 |
+
architecture: str = "Unknown"
|
27 |
license: str = "?"
|
28 |
likes: int = 0
|
29 |
num_params: int = 0
|
30 |
date: str = "" # submission date of request file
|
31 |
still_on_hub: bool = False
|
32 |
|
33 |
+
@classmethod
|
34 |
+
def init_from_json_file(self, json_filepath):
|
35 |
"""Inits the result from the specific model result file"""
|
36 |
with open(json_filepath) as fp:
|
37 |
data = json.load(fp)
|
38 |
|
39 |
+
config = data.get("config")
|
|
|
40 |
|
41 |
# Precision
|
42 |
precision = Precision.from_str(config.get("model_dtype"))
|
|
|
55 |
result_key = f"{org}_{model}_{precision.value.name}"
|
56 |
full_model = "/".join(org_and_model)
|
57 |
|
58 |
+
still_on_hub, _, model_config = is_model_on_hub(
|
59 |
+
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
60 |
+
)
|
61 |
architecture = "?"
|
62 |
if model_config is not None:
|
63 |
architectures = getattr(model_config, "architectures", None)
|
|
|
66 |
|
67 |
# Extract results available in this file (some results are split in several files)
|
68 |
results = {}
|
69 |
+
for task in Tasks:
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
task = task.value
|
71 |
|
72 |
+
# We average all scores of a given metric (not all metrics are present in all files)
|
73 |
+
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
if accs.size == 0 or any([acc is None for acc in accs]):
|
75 |
continue
|
76 |
|
77 |
mean_acc = np.mean(accs) * 100.0
|
|
|
78 |
results[task.benchmark] = mean_acc
|
79 |
|
80 |
+
return self(
|
81 |
+
eval_name=result_key,
|
82 |
+
full_model=full_model,
|
83 |
+
org=org,
|
84 |
+
model=model,
|
85 |
+
results=results,
|
86 |
+
precision=precision,
|
87 |
+
revision= config.get("model_sha", ""),
|
88 |
+
still_on_hub=still_on_hub,
|
89 |
+
architecture=architecture
|
90 |
+
)
|
91 |
|
92 |
def update_with_request_file(self, requests_path):
|
93 |
"""Finds the relevant request file for the current model and updates info with it"""
|
|
|
102 |
self.likes = request.get("likes", 0)
|
103 |
self.num_params = request.get("params", 0)
|
104 |
self.date = request.get("submitted_time", "")
|
105 |
+
except Exception:
|
106 |
+
print(f"Could not find request file for {self.org}/{self.model}")
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
def to_dict(self):
|
109 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
110 |
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
|
|
111 |
data_dict = {
|
112 |
"eval_name": self.eval_name, # not a column, just a save name,
|
113 |
AutoEvalColumn.precision.name: self.precision.value.name,
|
|
|
126 |
}
|
127 |
|
128 |
for task in Tasks:
|
129 |
+
data_dict[task.value.col_name] = self.results[task.value.benchmark]
|
|
|
130 |
|
131 |
return data_dict
|
132 |
|
133 |
|
134 |
def get_request_file_for_model(requests_path, model_name, precision):
|
135 |
+
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
136 |
request_files = os.path.join(
|
137 |
requests_path,
|
138 |
f"{model_name}_eval_request_*.json",
|
139 |
)
|
140 |
request_files = glob.glob(request_files)
|
141 |
|
142 |
+
print("yahaa", request_files)
|
143 |
+
|
144 |
# Select correct request file (precision)
|
145 |
request_file = ""
|
146 |
request_files = sorted(request_files, reverse=True)
|
|
|
147 |
for tmp_request_file in request_files:
|
148 |
with open(tmp_request_file, "r") as f:
|
149 |
req_content = json.load(f)
|
150 |
if (
|
151 |
+
req_content["status"] in ["FINISHED"]
|
152 |
+
and req_content["precision"] == precision.split(".")[-1]
|
153 |
):
|
154 |
request_file = tmp_request_file
|
155 |
return request_file
|
156 |
|
157 |
|
158 |
+
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
159 |
"""From the path of the results folder root, extract all needed info for results"""
|
160 |
model_result_filepaths = []
|
161 |
|
|
|
|
|
|
|
|
|
|
|
162 |
for root, _, files in os.walk(results_path):
|
163 |
# We should only have json files in model results
|
164 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
|
|
165 |
continue
|
166 |
|
167 |
# Sort the files by date
|
|
|
170 |
except dateutil.parser._parser.ParserError:
|
171 |
files = [files[-1]]
|
172 |
|
|
|
|
|
|
|
173 |
for file in files:
|
174 |
model_result_filepaths.append(os.path.join(root, file))
|
175 |
|
176 |
eval_results = {}
|
177 |
for model_result_filepath in model_result_filepaths:
|
178 |
# Creation of result
|
179 |
+
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
180 |
eval_result.update_with_request_file(requests_path)
|
181 |
|
182 |
# Store results of same eval together
|
|
|
188 |
|
189 |
results = []
|
190 |
for v in eval_results.values():
|
191 |
+
try:
|
192 |
+
v.to_dict() # we test if the dict version is complete
|
193 |
+
results.append(v)
|
194 |
+
except KeyError: # not all eval values present
|
195 |
+
continue
|
196 |
|
|
|
197 |
return results
|
src/populate.py
CHANGED
@@ -5,56 +5,23 @@ import pandas as pd
|
|
5 |
|
6 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
7 |
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
8 |
-
from src.leaderboard.
|
9 |
-
from src.leaderboard.read_evals import get_raw_eval_results, EvalResult
|
10 |
|
11 |
-
'''
|
12 |
-
This function, get_leaderboard_df, is designed to read and process evaluation results from a specified results path and requests path,
|
13 |
-
ultimately producing a leaderboard in the form of a pandas DataFrame. The process involves several steps, including filtering, sorting,
|
14 |
-
and cleaning the data based on specific criteria. Let's break down the function step by step:
|
15 |
-
|
16 |
-
'''
|
17 |
-
|
18 |
-
## TO-DO: if raw_data is [], return dummy df with correct columns so that the UI shows the right columns
|
19 |
-
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> tuple[list[EvalResult], pd.DataFrame]:
|
20 |
-
|
21 |
-
print(f"results_path = {results_path}")
|
22 |
|
|
|
23 |
raw_data = get_raw_eval_results(results_path, requests_path)
|
24 |
-
|
25 |
-
all_data_json = [v.to_dict() for v in raw_data] # if v.is_complete()]
|
26 |
-
# all_data_json.append(baseline_row)
|
27 |
-
filter_models(all_data_json)
|
28 |
-
|
29 |
-
print(f"all_data_json = {all_data_json}")
|
30 |
|
31 |
df = pd.DataFrame.from_records(all_data_json)
|
|
|
|
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
# Iterate over all attributes of AutoEvalColumn class
|
36 |
-
for attr_name in dir(AutoEvalColumn):
|
37 |
-
# Retrieve the attribute object
|
38 |
-
attr = getattr(AutoEvalColumn, attr_name)
|
39 |
-
# Check if the attribute has 'is_task' attribute and it is True
|
40 |
-
if hasattr(attr, 'is_task') and getattr(attr, 'is_task'):
|
41 |
-
task_attributes.append(attr)
|
42 |
-
|
43 |
-
# Now task_attributes contains all attributes where is_task=True
|
44 |
-
# print(task_attributes)
|
45 |
-
task_col_names_all = [str(item.name) for item in task_attributes]
|
46 |
-
|
47 |
-
# import pdb; pdb.set_trace()
|
48 |
-
|
49 |
-
# Add empty columns with specified names
|
50 |
-
for col_name in task_col_names_all:
|
51 |
-
if col_name not in df.columns:
|
52 |
-
df[col_name] = None
|
53 |
-
|
54 |
return raw_data, df
|
55 |
|
56 |
|
57 |
-
def get_evaluation_queue_df(save_path: str, cols: list) ->
|
58 |
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
59 |
all_evals = []
|
60 |
|
|
|
5 |
|
6 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
7 |
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
8 |
+
from src.leaderboard.read_evals import get_raw_eval_results
|
|
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
12 |
raw_data = get_raw_eval_results(results_path, requests_path)
|
13 |
+
all_data_json = [v.to_dict() for v in raw_data]
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
df = pd.DataFrame.from_records(all_data_json)
|
16 |
+
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
17 |
+
df = df[cols].round(decimals=2)
|
18 |
|
19 |
+
# filter out if any of the benchmarks have not been produced
|
20 |
+
df = df[has_no_nan_values(df, benchmark_cols)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
return raw_data, df
|
22 |
|
23 |
|
24 |
+
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
25 |
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
26 |
all_evals = []
|
27 |
|
src/submission/check_validity.py
CHANGED
@@ -7,17 +7,11 @@ from datetime import datetime, timedelta, timezone
|
|
7 |
import huggingface_hub
|
8 |
from huggingface_hub import ModelCard
|
9 |
from huggingface_hub.hf_api import ModelInfo
|
10 |
-
|
11 |
-
from transformers import AutoConfig, AutoTokenizer
|
12 |
from transformers.models.auto.tokenization_auto import tokenizer_class_from_name, get_tokenizer_config
|
13 |
|
14 |
-
from src.envs import HAS_HIGHER_RATE_LIMIT
|
15 |
-
|
16 |
-
|
17 |
-
# ht to @Wauplin, thank you for the snippet!
|
18 |
-
# See https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/317
|
19 |
def check_model_card(repo_id: str) -> tuple[bool, str]:
|
20 |
-
|
21 |
try:
|
22 |
card = ModelCard.load(repo_id)
|
23 |
except huggingface_hub.utils.EntryNotFoundError:
|
@@ -38,58 +32,28 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
|
|
38 |
return True, ""
|
39 |
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
# config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
44 |
-
# if test_tokenizer:
|
45 |
-
# tokenizer_config = get_tokenizer_config(model_name)
|
46 |
-
|
47 |
-
# if tokenizer_config is not None:
|
48 |
-
# tokenizer_class_candidate = tokenizer_config.get("tokenizer_class", None)
|
49 |
-
# else:
|
50 |
-
# tokenizer_class_candidate = config.tokenizer_class
|
51 |
-
|
52 |
-
# tokenizer_class = None
|
53 |
-
# if tokenizer_class_candidate is not None:
|
54 |
-
# tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
|
55 |
-
|
56 |
-
# if tokenizer_class is None:
|
57 |
-
# return (
|
58 |
-
# False,
|
59 |
-
# f"uses {tokenizer_class_candidate}, which is not in a transformers release, therefore not supported at the moment.", # pythia-160m throws this error. seems unnecessary.
|
60 |
-
# None
|
61 |
-
# )
|
62 |
-
# return True, None, config
|
63 |
-
|
64 |
-
# except ValueError:
|
65 |
-
# return (
|
66 |
-
# False,
|
67 |
-
# "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
68 |
-
# None
|
69 |
-
# )
|
70 |
-
|
71 |
-
# except Exception as e:
|
72 |
-
# print('XXX', e)
|
73 |
-
# return False, "was not found on hub!", None
|
74 |
-
|
75 |
-
# replaced with https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/blob/main/src/submission/check_validity.py
|
76 |
-
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str, AutoConfig]:
|
77 |
try:
|
78 |
-
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
79 |
if test_tokenizer:
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
return (
|
84 |
False,
|
85 |
-
f"uses
|
86 |
None
|
87 |
)
|
88 |
-
except Exception as e:
|
89 |
-
return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
|
90 |
return True, None, config
|
91 |
|
92 |
-
except ValueError
|
93 |
return (
|
94 |
False,
|
95 |
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
@@ -99,77 +63,25 @@ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_rem
|
|
99 |
except Exception as e:
|
100 |
return False, "was not found on hub!", None
|
101 |
|
|
|
102 |
def get_model_size(model_info: ModelInfo, precision: str):
|
103 |
-
|
104 |
try:
|
105 |
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
106 |
-
except (AttributeError, TypeError
|
107 |
-
|
108 |
-
size_match = re.search(size_pattern, model_info.modelId.lower())
|
109 |
-
model_size = size_match.group(0)
|
110 |
-
model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
|
111 |
-
except AttributeError:
|
112 |
-
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
113 |
|
114 |
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
115 |
model_size = size_factor * model_size
|
116 |
return model_size
|
117 |
|
118 |
def get_model_arch(model_info: ModelInfo):
|
|
|
119 |
return model_info.config.get("architectures", "Unknown")
|
120 |
|
121 |
-
def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota):
|
122 |
-
if org_or_user not in users_to_submission_dates:
|
123 |
-
return True, ""
|
124 |
-
submission_dates = sorted(users_to_submission_dates[org_or_user])
|
125 |
-
|
126 |
-
time_limit = (datetime.now(timezone.utc) - timedelta(days=rate_limit_period)).strftime("%Y-%m-%dT%H:%M:%SZ")
|
127 |
-
submissions_after_timelimit = [d for d in submission_dates if d > time_limit]
|
128 |
-
|
129 |
-
num_models_submitted_in_period = len(submissions_after_timelimit)
|
130 |
-
if org_or_user in HAS_HIGHER_RATE_LIMIT:
|
131 |
-
rate_limit_quota = 2 * rate_limit_quota
|
132 |
-
|
133 |
-
if num_models_submitted_in_period > rate_limit_quota:
|
134 |
-
error_msg = f"Organisation or user `{org_or_user}`"
|
135 |
-
error_msg += f"already has {num_models_submitted_in_period} model requests submitted to the leaderboard "
|
136 |
-
error_msg += f"in the last {rate_limit_period} days.\n"
|
137 |
-
error_msg += (
|
138 |
-
"Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗"
|
139 |
-
)
|
140 |
-
return False, error_msg
|
141 |
-
return True, ""
|
142 |
-
|
143 |
-
|
144 |
-
# # already_submitted_models(EVAL_REQUESTS_PATH) os.path.join(CACHE_PATH, "eval-queue")
|
145 |
-
# # REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
146 |
-
# # debug: current code doesn't allow submission of the same model for a different task.
|
147 |
-
# def already_submitted_models(requested_models_dir: str) -> set[str]:
|
148 |
-
# depth = 1
|
149 |
-
# file_names = []
|
150 |
-
# users_to_submission_dates = defaultdict(list)
|
151 |
-
|
152 |
-
# for root, _, files in os.walk(requested_models_dir):
|
153 |
-
# current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
|
154 |
-
# if current_depth == depth:
|
155 |
-
# for file in files:
|
156 |
-
# if not file.endswith(".json"):
|
157 |
-
# continue
|
158 |
-
# with open(os.path.join(root, file), "r") as f:
|
159 |
-
# info = json.load(f)
|
160 |
-
# file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
161 |
-
|
162 |
-
# # Select organisation
|
163 |
-
# if info["model"].count("/") == 0 or "submitted_time" not in info:
|
164 |
-
# continue
|
165 |
-
# organisation, _ = info["model"].split("/")
|
166 |
-
# users_to_submission_dates[organisation].append(info["submitted_time"]) # why is this useful?
|
167 |
-
|
168 |
-
# return set(file_names), users_to_submission_dates
|
169 |
-
|
170 |
def already_submitted_models(requested_models_dir: str) -> set[str]:
|
171 |
depth = 1
|
172 |
-
file_names = []
|
173 |
users_to_submission_dates = defaultdict(list)
|
174 |
|
175 |
for root, _, files in os.walk(requested_models_dir):
|
@@ -180,15 +92,12 @@ def already_submitted_models(requested_models_dir: str) -> set[str]:
|
|
180 |
continue
|
181 |
with open(os.path.join(root, file), "r") as f:
|
182 |
info = json.load(f)
|
183 |
-
|
184 |
-
for requested_task in requested_tasks:
|
185 |
-
|
186 |
-
file_names.append(f"{info['model']}_{requested_task}_{info['revision']}_{info['precision']}")
|
187 |
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
|
194 |
return set(file_names), users_to_submission_dates
|
|
|
7 |
import huggingface_hub
|
8 |
from huggingface_hub import ModelCard
|
9 |
from huggingface_hub.hf_api import ModelInfo
|
10 |
+
from transformers import AutoConfig
|
|
|
11 |
from transformers.models.auto.tokenization_auto import tokenizer_class_from_name, get_tokenizer_config
|
12 |
|
|
|
|
|
|
|
|
|
|
|
13 |
def check_model_card(repo_id: str) -> tuple[bool, str]:
|
14 |
+
"""Checks if the model card and license exist and have been filled"""
|
15 |
try:
|
16 |
card = ModelCard.load(repo_id)
|
17 |
except huggingface_hub.utils.EntryNotFoundError:
|
|
|
32 |
return True, ""
|
33 |
|
34 |
|
35 |
+
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
36 |
+
"""Makes sure the model is on the hub, and uses a valid configuration (in the latest transformers version)"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
try:
|
38 |
+
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
39 |
if test_tokenizer:
|
40 |
+
tokenizer_config = get_tokenizer_config(model_name)
|
41 |
+
if tokenizer_config is not None:
|
42 |
+
tokenizer_class_candidate = tokenizer_config.get("tokenizer_class", None)
|
43 |
+
else:
|
44 |
+
tokenizer_class_candidate = config.tokenizer_class
|
45 |
+
|
46 |
+
|
47 |
+
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
|
48 |
+
if tokenizer_class is None:
|
49 |
return (
|
50 |
False,
|
51 |
+
f"uses {tokenizer_class_candidate}, which is not in a transformers release, therefore not supported at the moment.",
|
52 |
None
|
53 |
)
|
|
|
|
|
54 |
return True, None, config
|
55 |
|
56 |
+
except ValueError:
|
57 |
return (
|
58 |
False,
|
59 |
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
|
|
63 |
except Exception as e:
|
64 |
return False, "was not found on hub!", None
|
65 |
|
66 |
+
|
67 |
def get_model_size(model_info: ModelInfo, precision: str):
|
68 |
+
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
69 |
try:
|
70 |
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
71 |
+
except (AttributeError, TypeError):
|
72 |
+
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
75 |
model_size = size_factor * model_size
|
76 |
return model_size
|
77 |
|
78 |
def get_model_arch(model_info: ModelInfo):
|
79 |
+
"""Gets the model architecture from the configuration"""
|
80 |
return model_info.config.get("architectures", "Unknown")
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
def already_submitted_models(requested_models_dir: str) -> set[str]:
|
83 |
depth = 1
|
84 |
+
file_names = []
|
85 |
users_to_submission_dates = defaultdict(list)
|
86 |
|
87 |
for root, _, files in os.walk(requested_models_dir):
|
|
|
92 |
continue
|
93 |
with open(os.path.join(root, file), "r") as f:
|
94 |
info = json.load(f)
|
95 |
+
file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
|
|
|
|
|
|
96 |
|
97 |
+
# Select organisation
|
98 |
+
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
99 |
+
continue
|
100 |
+
organisation, _ = info["model"].split("/")
|
101 |
+
users_to_submission_dates[organisation].append(info["submitted_time"])
|
102 |
|
103 |
return set(file_names), users_to_submission_dates
|
src/submission/submit.py
CHANGED
@@ -3,32 +3,22 @@ import os
|
|
3 |
from datetime import datetime, timezone
|
4 |
|
5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
-
from src.envs import API, EVAL_REQUESTS_PATH,
|
7 |
-
from src.leaderboard.filter_models import DO_NOT_SUBMIT_MODELS
|
8 |
from src.submission.check_validity import (
|
9 |
already_submitted_models,
|
10 |
check_model_card,
|
11 |
get_model_size,
|
12 |
is_model_on_hub,
|
13 |
-
user_submission_permission,
|
14 |
)
|
15 |
|
16 |
-
## it just uploads request file. where does the evaluation actually happen?
|
17 |
-
|
18 |
REQUESTED_MODELS = None
|
19 |
USERS_TO_SUBMISSION_DATES = None
|
20 |
|
21 |
-
|
22 |
def add_new_eval(
|
23 |
model: str,
|
24 |
-
|
25 |
-
requested_tasks: list, # write better type hints. this is list of class Task.
|
26 |
-
|
27 |
-
|
28 |
base_model: str,
|
29 |
revision: str,
|
30 |
precision: str,
|
31 |
-
private: bool,
|
32 |
weight_type: str,
|
33 |
model_type: str,
|
34 |
):
|
@@ -36,7 +26,6 @@ def add_new_eval(
|
|
36 |
global USERS_TO_SUBMISSION_DATES
|
37 |
if not REQUESTED_MODELS:
|
38 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
39 |
-
# REQUESTED_MODELS is set(file_names), where file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
40 |
|
41 |
user_name = ""
|
42 |
model_path = model
|
@@ -50,25 +39,13 @@ def add_new_eval(
|
|
50 |
if model_type is None or model_type == "":
|
51 |
return styled_error("Please select a model type.")
|
52 |
|
53 |
-
# Is the user rate limited?
|
54 |
-
if user_name != "":
|
55 |
-
user_can_submit, error_msg = user_submission_permission(
|
56 |
-
user_name, USERS_TO_SUBMISSION_DATES, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA
|
57 |
-
)
|
58 |
-
if not user_can_submit:
|
59 |
-
return styled_error(error_msg)
|
60 |
-
|
61 |
-
# Did the model authors forbid its submission to the leaderboard?
|
62 |
-
if model in DO_NOT_SUBMIT_MODELS or base_model in DO_NOT_SUBMIT_MODELS:
|
63 |
-
return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.")
|
64 |
-
|
65 |
# Does the model actually exist?
|
66 |
if revision == "":
|
67 |
revision = "main"
|
68 |
|
69 |
# Is the model on the hub?
|
70 |
if weight_type in ["Delta", "Adapter"]:
|
71 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=
|
72 |
if not base_model_on_hub:
|
73 |
return styled_error(f'Base model "{base_model}" {error}')
|
74 |
|
@@ -98,54 +75,10 @@ def add_new_eval(
|
|
98 |
# Seems good, creating the eval
|
99 |
print("Adding new eval")
|
100 |
|
101 |
-
print()
|
102 |
-
print(f"requested_tasks: {requested_tasks}")
|
103 |
-
print(f"type(requested_tasks): {type(requested_tasks)}")
|
104 |
-
print()
|
105 |
-
# requested_tasks: [{'benchmark': 'hellaswag', 'metric': 'acc_norm', 'col_name': 'HellaSwag'}, {'benchmark': 'pubmedqa', 'metric': 'acc', 'col_name': 'PubMedQA'}]
|
106 |
-
# type(requested_tasks): <class 'list'>
|
107 |
-
|
108 |
-
requested_task_names = [task_dic['benchmark'] for task_dic in requested_tasks]
|
109 |
-
|
110 |
-
print()
|
111 |
-
print(f"requested_task_names: {requested_task_names}")
|
112 |
-
print(f"type(requested_task_names): {type(requested_task_names)}")
|
113 |
-
print()
|
114 |
-
|
115 |
-
already_submitted_tasks = []
|
116 |
-
|
117 |
-
for requested_task_name in requested_task_names:
|
118 |
-
|
119 |
-
if f"{model}_{requested_task_name}_{revision}_{precision}" in REQUESTED_MODELS:
|
120 |
-
# return styled_warning("This model has been already submitted.")
|
121 |
-
already_submitted_tasks.append(requested_task_name)
|
122 |
-
|
123 |
-
task_names_for_eval = set(requested_task_names) - set(already_submitted_tasks)
|
124 |
-
task_names_for_eval = list(task_names_for_eval)
|
125 |
-
|
126 |
-
return_msg = "Your request has been submitted to the evaluation queue! Please wait for up to an hour for the model to show in the PENDING list."
|
127 |
-
if len(already_submitted_tasks) > 0:
|
128 |
-
|
129 |
-
return_msg = f"This model has been already submitted for task(s) {already_submitted_tasks}. Evaluation will proceed for tasks {task_names_for_eval}. Please wait for up to an hour for the model to show in the PENDING list."
|
130 |
-
|
131 |
-
if len(task_names_for_eval)==0:
|
132 |
-
return styled_warning(f"This model has been already submitted for task(s) {already_submitted_tasks}.")
|
133 |
-
|
134 |
-
tasks_for_eval = [dct for dct in requested_tasks if dct['benchmark'] in task_names_for_eval]
|
135 |
-
|
136 |
-
print()
|
137 |
-
print(f"tasks_for_eval: {tasks_for_eval}")
|
138 |
-
# print(f"type(requested_task_names): {type(requested_task_names)}")
|
139 |
-
print()
|
140 |
-
|
141 |
eval_entry = {
|
142 |
"model": model,
|
143 |
-
|
144 |
-
"requested_tasks": tasks_for_eval, # this is a list of tasks. would eval file be written correctly for each tasks? YES. run_evaluation() takes list of tasks. might have to specify
|
145 |
-
|
146 |
"base_model": base_model,
|
147 |
"revision": revision,
|
148 |
-
"private": private,
|
149 |
"precision": precision,
|
150 |
"weight_type": weight_type,
|
151 |
"status": "PENDING",
|
@@ -155,25 +88,20 @@ def add_new_eval(
|
|
155 |
"params": model_size,
|
156 |
"license": license,
|
157 |
}
|
158 |
-
|
159 |
-
|
160 |
-
####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####----
|
161 |
-
|
162 |
|
|
|
|
|
|
|
163 |
|
164 |
print("Creating eval file")
|
165 |
-
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
166 |
os.makedirs(OUT_DIR, exist_ok=True)
|
167 |
-
out_path = f"{OUT_DIR}/{model_path}
|
168 |
-
|
169 |
-
print(f"out_path = {out_path}")
|
170 |
|
171 |
with open(out_path, "w") as f:
|
172 |
-
f.write(json.dumps(eval_entry))
|
173 |
|
174 |
-
print("Uploading eval file
|
175 |
-
print()
|
176 |
-
print(f"path_or_fileobj={out_path}, path_in_repo={out_path.split('eval-queue/')[1]}, repo_id={QUEUE_REPO}, repo_type=dataset,")
|
177 |
API.upload_file(
|
178 |
path_or_fileobj=out_path,
|
179 |
path_in_repo=out_path.split("eval-queue/")[1],
|
@@ -182,10 +110,9 @@ def add_new_eval(
|
|
182 |
commit_message=f"Add {model} to eval queue",
|
183 |
)
|
184 |
|
185 |
-
print(f"is os.remove(out_path) the problem?")
|
186 |
# Remove the local file
|
187 |
os.remove(out_path)
|
188 |
|
189 |
return styled_message(
|
190 |
-
|
191 |
)
|
|
|
3 |
from datetime import datetime, timezone
|
4 |
|
5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
+
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
|
|
7 |
from src.submission.check_validity import (
|
8 |
already_submitted_models,
|
9 |
check_model_card,
|
10 |
get_model_size,
|
11 |
is_model_on_hub,
|
|
|
12 |
)
|
13 |
|
|
|
|
|
14 |
REQUESTED_MODELS = None
|
15 |
USERS_TO_SUBMISSION_DATES = None
|
16 |
|
|
|
17 |
def add_new_eval(
|
18 |
model: str,
|
|
|
|
|
|
|
|
|
19 |
base_model: str,
|
20 |
revision: str,
|
21 |
precision: str,
|
|
|
22 |
weight_type: str,
|
23 |
model_type: str,
|
24 |
):
|
|
|
26 |
global USERS_TO_SUBMISSION_DATES
|
27 |
if not REQUESTED_MODELS:
|
28 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
|
|
29 |
|
30 |
user_name = ""
|
31 |
model_path = model
|
|
|
39 |
if model_type is None or model_type == "":
|
40 |
return styled_error("Please select a model type.")
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# Does the model actually exist?
|
43 |
if revision == "":
|
44 |
revision = "main"
|
45 |
|
46 |
# Is the model on the hub?
|
47 |
if weight_type in ["Delta", "Adapter"]:
|
48 |
+
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
|
49 |
if not base_model_on_hub:
|
50 |
return styled_error(f'Base model "{base_model}" {error}')
|
51 |
|
|
|
75 |
# Seems good, creating the eval
|
76 |
print("Adding new eval")
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
eval_entry = {
|
79 |
"model": model,
|
|
|
|
|
|
|
80 |
"base_model": base_model,
|
81 |
"revision": revision,
|
|
|
82 |
"precision": precision,
|
83 |
"weight_type": weight_type,
|
84 |
"status": "PENDING",
|
|
|
88 |
"params": model_size,
|
89 |
"license": license,
|
90 |
}
|
|
|
|
|
|
|
|
|
91 |
|
92 |
+
# Check for duplicate submission
|
93 |
+
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
|
94 |
+
return styled_warning("This model has been already submitted.")
|
95 |
|
96 |
print("Creating eval file")
|
97 |
+
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
98 |
os.makedirs(OUT_DIR, exist_ok=True)
|
99 |
+
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
|
|
|
|
|
100 |
|
101 |
with open(out_path, "w") as f:
|
102 |
+
f.write(json.dumps(eval_entry))
|
103 |
|
104 |
+
print("Uploading eval file")
|
|
|
|
|
105 |
API.upload_file(
|
106 |
path_or_fileobj=out_path,
|
107 |
path_in_repo=out_path.split("eval-queue/")[1],
|
|
|
110 |
commit_message=f"Add {model} to eval queue",
|
111 |
)
|
112 |
|
|
|
113 |
# Remove the local file
|
114 |
os.remove(out_path)
|
115 |
|
116 |
return styled_message(
|
117 |
+
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
|
118 |
)
|
src/utils.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
from huggingface_hub import snapshot_download
|
3 |
-
|
4 |
-
|
5 |
-
def my_snapshot_download(repo_id, revision, local_dir, repo_type, max_workers):
|
6 |
-
for i in range(10):
|
7 |
-
try:
|
8 |
-
snapshot_download(repo_id=repo_id, revision=revision, local_dir=local_dir, repo_type=repo_type, max_workers=max_workers)
|
9 |
-
return
|
10 |
-
except Exception:
|
11 |
-
import time
|
12 |
-
time.sleep(60)
|
13 |
-
return
|
14 |
-
|
15 |
-
|
16 |
-
def get_dataset_url(row):
|
17 |
-
dataset_name = row['Benchmark']
|
18 |
-
dataset_url = row['Dataset Link']
|
19 |
-
benchmark = f'<a target="_blank" href="{dataset_url}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{dataset_name}</a>'
|
20 |
-
return benchmark
|
21 |
-
|
22 |
-
def get_dataset_summary_table(file_path):
|
23 |
-
df = pd.read_csv(file_path)
|
24 |
-
|
25 |
-
df['Benchmark'] = df.apply(lambda x: get_dataset_url(x), axis=1)
|
26 |
-
|
27 |
-
df = df[['Category', 'Benchmark', 'Data Split', 'Data Size', 'Language']]
|
28 |
-
|
29 |
-
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|