MotzWanted commited on
Commit
be62d39
1 Parent(s): cd5a002

feat: fork biomed leaderboard

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+
3
+ eval-results/
4
+ eval-queue/
5
+ eval-results-bk/
6
+ eval-queue-bk/
7
+
8
+ src/backend/tasks/
9
+ src/backend/probing_tasks/
10
+ hub/
11
+ offload/
12
+
13
+ token
app.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+
4
+ from apscheduler.schedulers.background import BackgroundScheduler
5
+
6
+ from src.display.css_html_js import custom_css
7
+
8
+ from src.display.about import (
9
+ CITATION_BUTTON_LABEL,
10
+ CITATION_BUTTON_TEXT,
11
+ EVALUATION_QUEUE_TEXT,
12
+ INTRODUCTION_TEXT,
13
+ LLM_BENCHMARKS_TEXT,
14
+ LLM_BENCHMARKS_DETAILS,
15
+ FAQ_TEXT,
16
+ TITLE,
17
+ )
18
+
19
+ from src.display.utils import (
20
+ BENCHMARK_COLS,
21
+ COLS,
22
+ EVAL_COLS,
23
+ EVAL_TYPES,
24
+ NUMERIC_INTERVALS,
25
+ TYPES,
26
+ AutoEvalColumn,
27
+ ModelType,
28
+ fields,
29
+ WeightType,
30
+ Precision
31
+ )
32
+
33
+ from src.populate import get_evaluation_queue_df, get_leaderboard_df
34
+
35
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
36
+ from src.submission.submit import add_new_eval
37
+
38
+ from src.display.utils import Tasks
39
+
40
+ from huggingface_hub import snapshot_download
41
+
42
+ ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## -------
43
+
44
+ def restart_space():
45
+ API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
46
+
47
+ def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout):
48
+ try:
49
+ print(f"local_dir for snapshot download = {local_dir}")
50
+ snapshot_download(repo_id=repo_id, local_dir=local_dir, repo_type=repo_type, tqdm_class=tqdm_class, etag_timeout=etag_timeout)
51
+ except Exception:
52
+ print(f"ui_snapshot_download failed. restarting space...")
53
+ restart_space()
54
+
55
+ # Searching and filtering
56
+ def update_table(hidden_df: pd.DataFrame, columns: list, type_query: list, precision_query: list, size_query: list, query: str):
57
+ print(f"hidden_df = {hidden_df}")
58
+ show_deleted = True
59
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
60
+
61
+ print(f"filtered_df = {filtered_df}")
62
+ filtered_df = filter_queries(query, filtered_df)
63
+ df = select_columns(filtered_df, columns)
64
+ print(f"df = {df}")
65
+ return df
66
+
67
+ def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
68
+ return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
69
+
70
+
71
+ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
72
+ always_here_cols = [
73
+ AutoEvalColumn.model_type_symbol.name,
74
+ AutoEvalColumn.model.name,
75
+ ]
76
+ # We use COLS to maintain sorting
77
+ filtered_df = df[
78
+ always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
79
+ ]
80
+ return filtered_df
81
+
82
+ def filter_queries(query: str, filtered_df: pd.DataFrame):
83
+ final_df = []
84
+ if query != "":
85
+ queries = [q.strip() for q in query.split(";")]
86
+ for _q in queries:
87
+ _q = _q.strip()
88
+ if _q != "":
89
+ temp_filtered_df = search_table(filtered_df, _q)
90
+ if len(temp_filtered_df) > 0:
91
+ final_df.append(temp_filtered_df)
92
+ if len(final_df) > 0:
93
+ filtered_df = pd.concat(final_df)
94
+ filtered_df = filtered_df.drop_duplicates(
95
+ subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
96
+ )
97
+
98
+ return filtered_df
99
+
100
+
101
+ def filter_models(df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool) -> pd.DataFrame:
102
+
103
+ print(f"filter_models()'s df: {df}\n")
104
+ # Show all models
105
+ if show_deleted:
106
+ filtered_df = df
107
+ else: # Show only still on the hub models
108
+ filtered_df = df[df[AutoEvalColumn.still_on_hub.name] is True]
109
+
110
+ type_emoji = [t[0] for t in type_query]
111
+ filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
112
+ filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
113
+
114
+ numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
115
+ params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
116
+ mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
117
+ filtered_df = filtered_df.loc[mask]
118
+
119
+ return filtered_df
120
+
121
+
122
+ ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## ------- ## -------
123
+
124
+ ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
125
+ ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
126
+
127
+ print(f"COLS = {COLS}")
128
+
129
+
130
+ raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) # k the problem is that the results are only saved in _bk dirs.
131
+ leaderboard_df = original_df.copy()
132
+ print(f"leaderboard_df = {leaderboard_df}")
133
+
134
+
135
+ ################################################################################################################################
136
+ demo = gr.Blocks(css=custom_css)
137
+ with demo:
138
+ gr.HTML(TITLE)
139
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
140
+
141
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
142
+
143
+ # toggle break 1: this tab just RENDERS existing result files on remote repo.
144
+ with gr.TabItem("Benchmarks", elem_id="llm-benchmark-tab-table", id=0):
145
+
146
+ with gr.Row():
147
+ with gr.Column():
148
+ with gr.Row():
149
+ search_bar = gr.Textbox(placeholder=" 🔍 Model search (separate multiple queries with `;`)", show_label=False, elem_id="search-bar",)
150
+ with gr.Row():
151
+ shown_columns = gr.CheckboxGroup(
152
+ choices=[
153
+ c.name
154
+ for c in fields(AutoEvalColumn)
155
+ if not c.hidden and not c.never_hidden and not c.dummy
156
+ ],
157
+ value=[
158
+ c.name
159
+ for c in fields(AutoEvalColumn)
160
+ if c.displayed_by_default and not c.hidden and not c.never_hidden
161
+ ],
162
+ label="Select columns to show",
163
+ elem_id="column-select",
164
+ interactive=True,
165
+ )
166
+
167
+ with gr.Column(min_width=320):
168
+ filter_columns_type = gr.CheckboxGroup(
169
+ label="Model types",
170
+ choices=[t.to_str() for t in ModelType],
171
+ value=[t.to_str() for t in ModelType],
172
+ interactive=True,
173
+ elem_id="filter-columns-type",
174
+ )
175
+ filter_columns_precision = gr.CheckboxGroup(
176
+ label="Precision",
177
+ choices=[i.value.name for i in Precision],
178
+ value=[i.value.name for i in Precision],
179
+ interactive=True,
180
+ elem_id="filter-columns-precision",
181
+ )
182
+ filter_columns_size = gr.CheckboxGroup(
183
+ label="Model sizes (in billions of parameters)",
184
+ choices=list(NUMERIC_INTERVALS.keys()),
185
+ value=list(NUMERIC_INTERVALS.keys()),
186
+ interactive=True,
187
+ elem_id="filter-columns-size",
188
+ )
189
+
190
+ # leaderboard_table = gr.components.Dataframe(
191
+ # value=leaderboard_df[
192
+ # [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
193
+ # + shown_columns.value
194
+ # + [AutoEvalColumn.dummy.name]
195
+ # ] if leaderboard_df.empty is False else leaderboard_df,
196
+ # headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
197
+ # datatype=TYPES,
198
+ # elem_id="leaderboard-table",
199
+ # interactive=False,
200
+ # visible=True,
201
+ # column_widths=["2%", "20%"]
202
+ # )
203
+ leaderboard_table = gr.components.Dataframe(
204
+ # value=leaderboard_df,
205
+ value=leaderboard_df[
206
+ [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
207
+ + shown_columns.value
208
+ + [AutoEvalColumn.dummy.name]
209
+ ] if leaderboard_df.empty is False else leaderboard_df,
210
+ headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
211
+ datatype=TYPES,
212
+ elem_id="leaderboard-table",
213
+ interactive=False,
214
+ visible=True,
215
+ # column_widths=["2%", "20%"]
216
+ )
217
+ # Dummy leaderboard for handling the case when the user uses backspace key
218
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
219
+ value=original_df[COLS] if original_df.empty is False else original_df,
220
+ headers=COLS,
221
+ datatype=TYPES,
222
+ visible=False
223
+ )
224
+ for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size]:
225
+ selector.change(
226
+ update_table,
227
+ [
228
+ hidden_leaderboard_table_for_search,
229
+ shown_columns,
230
+ filter_columns_type,
231
+ filter_columns_precision,
232
+ filter_columns_size,
233
+ search_bar,
234
+ ],
235
+ leaderboard_table,
236
+ queue=True,
237
+ )
238
+
239
+ # toggle break 2: Submission -> runs add_new_eval() (actual evaluation is done on backend when backend-cli.py is run.)
240
+ with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
241
+ # with gr.Column():
242
+ # with gr.Row():
243
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
244
+
245
+ # with gr.Column():
246
+ # with gr.Accordion(
247
+ # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
248
+ # open=False,
249
+ # ):
250
+ # with gr.Row():
251
+ # finished_eval_table = gr.components.Dataframe(
252
+ # value=finished_eval_queue_df,
253
+ # headers=EVAL_COLS,
254
+ # datatype=EVAL_TYPES,
255
+ # row_count=5
256
+ # )
257
+ # with gr.Accordion(
258
+ # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
259
+ # open=False,
260
+ # ):
261
+ # with gr.Row():
262
+ # running_eval_table = gr.components.Dataframe(
263
+ # value=running_eval_queue_df,
264
+ # headers=EVAL_COLS,
265
+ # datatype=EVAL_TYPES,
266
+ # row_count=5
267
+ # )
268
+
269
+ # with gr.Accordion(
270
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
271
+ # open=False,
272
+ # ):
273
+ # with gr.Row():
274
+ # pending_eval_table = gr.components.Dataframe(
275
+ # value=pending_eval_queue_df,
276
+ # headers=EVAL_COLS,
277
+ # datatype=EVAL_TYPES,
278
+ # row_count=5
279
+ # )
280
+ with gr.Row():
281
+ gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
282
+
283
+ with gr.Row():
284
+ with gr.Column():
285
+ model_name_textbox = gr.Textbox(label="Model name")
286
+ # You can use the revision parameter to point to the specific commit hash when downloading.
287
+ revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
288
+ private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
289
+ model_type = gr.Dropdown(
290
+ choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
291
+ label="Model type",
292
+ multiselect=False,
293
+ value=None,
294
+ interactive=True,
295
+ )
296
+
297
+ with gr.Column():
298
+ precision = gr.Dropdown(
299
+ choices=[i.value.name for i in Precision if i != Precision.Unknown],
300
+ label="Precision",
301
+ multiselect=False,
302
+ value="float32",
303
+ interactive=True,
304
+ )
305
+ weight_type = gr.Dropdown(
306
+ choices=[i.value.name for i in WeightType],
307
+ label="Weights type",
308
+ multiselect=False,
309
+ value="Original",
310
+ interactive=True,
311
+ )
312
+
313
+
314
+ requested_tasks = gr.CheckboxGroup(
315
+ choices=[ (i.value.col_name, i.value) for i in Tasks],
316
+
317
+ label="Select tasks",
318
+ elem_id="task-select",
319
+ interactive=True,
320
+ )
321
+
322
+
323
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
324
+
325
+ submit_button = gr.Button("Submit Eval")
326
+ submission_result = gr.Markdown()
327
+
328
+ # we need to add task specification argument here as well.
329
+ submit_button.click(
330
+ add_new_eval,
331
+ [
332
+ model_name_textbox,
333
+
334
+ requested_tasks, # is this a list of str or class Task? i think it's Task.
335
+
336
+ base_model_name_textbox,
337
+ revision_name_textbox,
338
+ precision,
339
+ private,
340
+ weight_type,
341
+ model_type,
342
+ ],
343
+ submission_result)
344
+
345
+
346
+
347
+ # demo.launch()
348
+
349
+ ####
350
+
351
+ scheduler = BackgroundScheduler()
352
+
353
+ scheduler.add_job(restart_space, "interval", seconds=6 * 60 * 60)
354
+
355
+ scheduler.start()
356
+ # demo.queue(default_concurrency_limit=40).launch()
357
+
358
+ # demo.launch(show_api=False, enable_queue=False)
359
+ demo.launch() # TypeError: Blocks.launch() got an unexpected keyword argument 'enable_queue'
app_empty.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name):
4
+ return "Hello " + name + "!!"
5
+
6
+ # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ # iface.launch()
backend-cli.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+ import json
5
+
6
+ import random
7
+ from datetime import datetime
8
+
9
+ from src.backend.run_eval_suite import run_evaluation
10
+ from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
11
+ from src.backend.sort_queue import sort_models_by_priority
12
+
13
+
14
+ from src.backend.envs import EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND, DEVICE, LIMIT, Tasks, Task, num_fewshots
15
+
16
+ from src.backend.manage_requests import EvalRequest
17
+ from src.leaderboard.read_evals import EvalResult
18
+
19
+ from src.envs import QUEUE_REPO, RESULTS_REPO, API
20
+ from src.utils import my_snapshot_download
21
+
22
+ import time
23
+
24
+ import logging
25
+ import pprint
26
+ import argparse
27
+
28
+
29
+ # def get_subdirectories(path):
30
+ # subdirectories = []
31
+ # # Get all entries in the directory
32
+ # entries = os.listdir(path)
33
+ # for entry in entries:
34
+ # # Check if the entry is a directory
35
+ # if os.path.isdir(os.path.join(path, entry)):
36
+ # subdirectories.append(entry)
37
+ # return subdirectories
38
+
39
+ # parser = argparse.ArgumentParser(description="Get subdirectory names")
40
+ # parser.add_argument("include_path", help="Path to the directory", nargs='?', default=None)
41
+ # args = parser.parse_args()
42
+
43
+ # # = get_subdirectories(args.include_path)
44
+
45
+
46
+
47
+
48
+ def my_set_eval_request(api, eval_request, set_to_status, hf_repo, local_dir):
49
+ for i in range(10):
50
+ try:
51
+ set_eval_request(api=api, eval_request=eval_request, set_to_status=set_to_status, hf_repo=hf_repo, local_dir=local_dir)
52
+ return
53
+ except Exception:
54
+ time.sleep(60)
55
+ return
56
+
57
+
58
+ logging.getLogger("openai").setLevel(logging.WARNING)
59
+
60
+ logging.basicConfig(level=logging.ERROR)
61
+ pp = pprint.PrettyPrinter(width=80)
62
+
63
+ PENDING_STATUS = "PENDING"
64
+ RUNNING_STATUS = "RUNNING"
65
+ FINISHED_STATUS = "FINISHED"
66
+ FAILED_STATUS = "FAILED"
67
+
68
+ TASKS_HARNESS = [task.value for task in Tasks]
69
+
70
+ # starts by downloading results and requests. makes sense since we want to be able to use different backend servers!
71
+ my_snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
72
+ my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
73
+
74
+
75
+ def sanity_checks():
76
+ print(f'Device: {DEVICE}')
77
+
78
+ # pull the eval dataset from the hub and parse any eval requests
79
+ # check completed evals and set them to finished
80
+ my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
81
+ check_completed_evals(api=API, checked_status=RUNNING_STATUS, completed_status=FINISHED_STATUS,
82
+ failed_status=FAILED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND,
83
+ hf_repo_results=RESULTS_REPO, local_dir_results=EVAL_RESULTS_PATH_BACKEND)
84
+ return
85
+
86
+
87
+ def request_to_result_name(request: EvalRequest) -> str:
88
+
89
+ org_and_model = request.model.split("/", 1)
90
+ if len(org_and_model) == 1:
91
+ model = org_and_model[0]
92
+ res = f"{model}_{request.precision}"
93
+ else:
94
+ org = org_and_model[0]
95
+ model = org_and_model[1]
96
+ res = f"{org}_{model}_{request.precision}"
97
+ return res
98
+
99
+ # doesn't make distinctions for tasks since the original code runs eval on ALL tasks.
100
+ def process_evaluation(task_name: str, eval_request: EvalRequest) -> dict:
101
+ # batch_size = 1
102
+ batch_size = "auto"
103
+
104
+ # might not have to get the benchmark.
105
+ print(f"task_name parameter in process_evaluation() = {task_name}") #, task_names=[task.benchmark] = {[task.benchmark]}")
106
+
107
+ num_fewshot = num_fewshots[task_name]
108
+
109
+ results = run_evaluation(eval_request=eval_request, task_names=task_name, num_fewshot=num_fewshot,
110
+ batch_size=batch_size, device=DEVICE, use_cache=None, limit=LIMIT)
111
+
112
+ print('RESULTS', results)
113
+
114
+ dumped = json.dumps(results, indent=2, default=lambda o: '<not serializable>')
115
+ print(dumped)
116
+
117
+ output_path = os.path.join(EVAL_RESULTS_PATH_BACKEND, *eval_request.model.split("/"), f"results_{task_name}_{datetime.now()}.json")
118
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
119
+ with open(output_path, "w") as f:
120
+ f.write(dumped)
121
+
122
+ my_snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
123
+ API.upload_file(path_or_fileobj=output_path, path_in_repo=f"{eval_request.model}/results_{task_name}_{datetime.now()}.json",
124
+ repo_id=RESULTS_REPO, repo_type="dataset")
125
+ return results
126
+
127
+
128
+ # the rendering is done with files in local repo.
129
+ def process_pending_requests() -> bool:
130
+ sanity_checks()
131
+
132
+ current_pending_status = [PENDING_STATUS]
133
+
134
+ # Get all eval request that are PENDING, if you want to run other evals, change this parameter
135
+ # GETTING REQUESTS FROM THE HUB NOT LOCAL DIR.
136
+ eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
137
+ # Sort the evals by priority (first submitted first run)
138
+ eval_requests = sort_models_by_priority(api=API, models=eval_requests)
139
+
140
+ random.shuffle(eval_requests)
141
+
142
+ # this says zero
143
+ print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
144
+
145
+ if len(eval_requests) == 0:
146
+ return False
147
+
148
+ eval_request = eval_requests[0]
149
+ pp.pprint(eval_request)
150
+
151
+ my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
152
+ my_set_eval_request(api=API, eval_request=eval_request, set_to_status=RUNNING_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
153
+
154
+ # task_lst = TASKS_HARNESS.copy()
155
+ task_lst = eval_request.get_user_requested_task_names()
156
+ random.shuffle(task_lst)
157
+ print(f"task_lst in process_pending_requests(): {task_lst}")
158
+
159
+ for task_name in task_lst:
160
+
161
+ results = process_evaluation(task_name, eval_request)
162
+
163
+ my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
164
+ my_set_eval_request(api=API, eval_request=eval_request, set_to_status=FINISHED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
165
+
166
+ return True
167
+
168
+
169
+ if __name__ == "__main__":
170
+ # wait = True
171
+
172
+ # import socket
173
+ # if socket.gethostname() in {'hamburg'} or os.path.isdir("/home/pminervi"):
174
+ # wait = False
175
+
176
+ # if wait:
177
+ # time.sleep(60 * random.randint(2, 5))
178
+ # pass
179
+
180
+ # res = False
181
+ res = process_pending_requests()
182
+
183
+ # if res is False:
184
+ # res = process_finished_requests(100)
185
+
186
+ # if res is False:
187
+ # res = process_finished_requests(0)
manage_repos.ipynb ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "https://huggingface.co/datasets/chaeeunlee/test_requests\n",
8
+ "\n",
9
+ "https://huggingface.co/datasets/chaeeunlee/test_results"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 1,
15
+ "metadata": {},
16
+ "outputs": [
17
+ {
18
+ "name": "stdout",
19
+ "output_type": "stream",
20
+ "text": [
21
+ "CACHE_PATH = /Users/chaeeunlee/Documents/VSC_workspaces/huggingface_home_cache\n"
22
+ ]
23
+ },
24
+ {
25
+ "name": "stderr",
26
+ "output_type": "stream",
27
+ "text": [
28
+ "/Users/chaeeunlee/anaconda3/envs/lb/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
29
+ " from .autonotebook import tqdm as notebook_tqdm\n"
30
+ ]
31
+ },
32
+ {
33
+ "data": {
34
+ "text/plain": [
35
+ "'\\n( path_in_repo: str\\nrepo_id: str\\ntoken: typing.Optional[str] = None\\nrepo_type: typing.Optional[str] = Nonerevision: typing.Optional[str] = Nonecommit_message: typing.Optional[str] = Nonecommit_description: typing.Optional[str] = Nonecreate_pr: typing.Optional[bool] = Noneparent_commit: typing.Optional[str] = None )\\n'"
36
+ ]
37
+ },
38
+ "execution_count": 1,
39
+ "metadata": {},
40
+ "output_type": "execute_result"
41
+ }
42
+ ],
43
+ "source": [
44
+ "from src.envs import H4_TOKEN, API, QUEUE_REPO, RESULTS_REPO, REPO_ID\n",
45
+ "\n",
46
+ "from huggingface_hub import HfApi\n",
47
+ "\n",
48
+ "'''\n",
49
+ "( path_in_repo: str\n",
50
+ "repo_id: str\n",
51
+ "token: typing.Optional[str] = None\n",
52
+ "repo_type: typing.Optional[str] = Nonerevision: typing.Optional[str] = Nonecommit_message: typing.Optional[str] = Nonecommit_description: typing.Optional[str] = Nonecreate_pr: typing.Optional[bool] = Noneparent_commit: typing.Optional[str] = None )\n",
53
+ "'''\n",
54
+ "\n"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": 2,
60
+ "metadata": {},
61
+ "outputs": [],
62
+ "source": [
63
+ "res = API.delete_folder(path_in_repo='hub/', repo_id=REPO_ID, repo_type='space')"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "execution_count": 14,
69
+ "metadata": {},
70
+ "outputs": [
71
+ {
72
+ "ename": "EntryNotFoundError",
73
+ "evalue": "404 Client Error. (Request ID: Root=1-65c43c73-7771219478c3ca215705378d;6308513c-7fb2-4810-afa4-9ea734f21820)\n\nEntry Not Found for url: https://huggingface.co/api/datasets/chaeeunlee/test_results/commit/main.",
74
+ "output_type": "error",
75
+ "traceback": [
76
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
77
+ "\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)",
78
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py:286\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 285\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 286\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m e:\n",
79
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/requests/models.py:1021\u001b[0m, in \u001b[0;36mResponse.raise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m http_error_msg:\n\u001b[0;32m-> 1021\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HTTPError(http_error_msg, response\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n",
80
+ "\u001b[0;31mHTTPError\u001b[0m: 404 Client Error: Not Found for url: https://huggingface.co/api/datasets/chaeeunlee/test_results/commit/main",
81
+ "\nThe above exception was the direct cause of the following exception:\n",
82
+ "\u001b[0;31mEntryNotFoundError\u001b[0m Traceback (most recent call last)",
83
+ "Cell \u001b[0;32mIn[14], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mAPI\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdelete_folder\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath_in_repo\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mEleutherAI/\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrepo_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mRESULTS_REPO\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mdataset\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
84
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py:118\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 116\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
85
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/hf_api.py:4767\u001b[0m, in \u001b[0;36mHfApi.delete_folder\u001b[0;34m(self, path_in_repo, repo_id, token, repo_type, revision, commit_message, commit_description, create_pr, parent_commit)\u001b[0m\n\u001b[1;32m 4716\u001b[0m \u001b[38;5;129m@validate_hf_hub_args\u001b[39m\n\u001b[1;32m 4717\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdelete_folder\u001b[39m(\n\u001b[1;32m 4718\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 4728\u001b[0m parent_commit: Optional[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 4729\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m CommitInfo:\n\u001b[1;32m 4730\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 4731\u001b[0m \u001b[38;5;124;03m Deletes a folder in the given repo.\u001b[39;00m\n\u001b[1;32m 4732\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 4765\u001b[0m \u001b[38;5;124;03m especially useful if the repo is updated / committed to concurrently.\u001b[39;00m\n\u001b[1;32m 4766\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m-> 4767\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate_commit\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4768\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4769\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4770\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4771\u001b[0m \u001b[43m \u001b[49m\u001b[43moperations\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43mCommitOperationDelete\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath_in_repo\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpath_in_repo\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mis_folder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4772\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4773\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_message\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4774\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_message\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mcommit_message\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mis\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mDelete folder \u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mpath_in_repo\u001b[49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m with huggingface_hub\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 4775\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4776\u001b[0m \u001b[43m \u001b[49m\u001b[43mcommit_description\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_description\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4777\u001b[0m \u001b[43m \u001b[49m\u001b[43mcreate_pr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcreate_pr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4778\u001b[0m \u001b[43m \u001b[49m\u001b[43mparent_commit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparent_commit\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4779\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
86
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py:118\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.<locals>._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 116\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
87
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/hf_api.py:1208\u001b[0m, in \u001b[0;36mfuture_compatible.<locals>._inner\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1205\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrun_as_future(fn, \u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 1207\u001b[0m \u001b[38;5;66;03m# Otherwise, call the function normally\u001b[39;00m\n\u001b[0;32m-> 1208\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
88
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/hf_api.py:3600\u001b[0m, in \u001b[0;36mHfApi.create_commit\u001b[0;34m(self, repo_id, operations, commit_message, commit_description, token, repo_type, revision, create_pr, num_threads, parent_commit, run_as_future)\u001b[0m\n\u001b[1;32m 3598\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 3599\u001b[0m commit_resp \u001b[38;5;241m=\u001b[39m get_session()\u001b[38;5;241m.\u001b[39mpost(url\u001b[38;5;241m=\u001b[39mcommit_url, headers\u001b[38;5;241m=\u001b[39mheaders, data\u001b[38;5;241m=\u001b[39mdata, params\u001b[38;5;241m=\u001b[39mparams)\n\u001b[0;32m-> 3600\u001b[0m \u001b[43mhf_raise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcommit_resp\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mendpoint_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcommit\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3601\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m RepositoryNotFoundError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 3602\u001b[0m e\u001b[38;5;241m.\u001b[39mappend_to_message(_CREATE_COMMIT_NO_REPO_ERROR_MESSAGE)\n",
89
+ "File \u001b[0;32m~/anaconda3/envs/lb/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py:296\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 294\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m error_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEntryNotFound\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 295\u001b[0m message \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39mstatus_code\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m Client Error.\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEntry Not Found for url: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m--> 296\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m EntryNotFoundError(message, response) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 298\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m error_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGatedRepo\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 299\u001b[0m message \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 300\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39mstatus_code\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m Client Error.\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot access gated repo for url \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 301\u001b[0m )\n",
90
+ "\u001b[0;31mEntryNotFoundError\u001b[0m: 404 Client Error. (Request ID: Root=1-65c43c73-7771219478c3ca215705378d;6308513c-7fb2-4810-afa4-9ea734f21820)\n\nEntry Not Found for url: https://huggingface.co/api/datasets/chaeeunlee/test_results/commit/main."
91
+ ]
92
+ }
93
+ ],
94
+ "source": [
95
+ "res = API.delete_folder(path_in_repo='EleutherAI/', repo_id=RESULTS_REPO, repo_type='dataset')"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "execution_count": 15,
101
+ "metadata": {},
102
+ "outputs": [],
103
+ "source": [
104
+ "res = API.delete_folder(path_in_repo='EleutherAI/pythia-70m_biolama_umls_eval_request_False_float32_Original.json', repo_id=QUEUE_REPO, repo_type='dataset')\n",
105
+ "# res = API.delete_folder(path_in_repo='mistralai/', repo_id=QUEUE_REPO, repo_type='dataset')\n",
106
+ "\n",
107
+ "# res = API.delete_file(path_in_repo=\"EleutherAI/pythia-70m_pubmedqa_eval_request_False_float32_Original.json\", repo_id=QUEUE_REPO, repo_type='dataset')\n"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": null,
113
+ "metadata": {},
114
+ "outputs": [],
115
+ "source": [
116
+ "# import os\n",
117
+ "\n",
118
+ "# for root, _, files in os.walk(results_path):\n",
119
+ "# # We should only have json files in model results\n",
120
+ "# if len(files) == 0 or any([not f.endswith(\".json\") for f in files]):\n",
121
+ "# continue\n",
122
+ "\n",
123
+ "# # Sort the files by date\n",
124
+ "# try:\n",
125
+ "# files.sort(key=lambda x: x.removesuffix(\".json\").removeprefix(\"results_\")[:-7])\n",
126
+ "# except dateutil.parser._parser.ParserError:\n",
127
+ "# files = [files[-1]]\n",
128
+ "\n",
129
+ "\n",
130
+ "# print(f\"files = {files}\")\n",
131
+ "\n",
132
+ "# for file in files:\n",
133
+ "# model_result_filepaths.append(os.path.join(root, file))"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": null,
139
+ "metadata": {},
140
+ "outputs": [
141
+ {
142
+ "name": "stdout",
143
+ "output_type": "stream",
144
+ "text": [
145
+ "DatasetInfo(id='chaeeunlee/test_requests', author='chaeeunlee', sha='c7f4d0c0b1207cc773dcd0b1df49cd6a883e02be', created_at=datetime.datetime(2024, 1, 31, 11, 19, 22, tzinfo=datetime.timezone.utc), last_modified=datetime.datetime(2024, 1, 31, 19, 55, 30, tzinfo=datetime.timezone.utc), private=False, gated=False, disabled=False, downloads=0, likes=0, paperswithcode_id=None, tags=['license:mit', 'region:us'], card_data={'annotations_creators': None, 'language_creators': None, 'language': None, 'license': 'mit', 'multilinguality': None, 'size_categories': None, 'source_datasets': None, 'task_categories': None, 'task_ids': None, 'paperswithcode_id': None, 'pretty_name': None, 'config_names': None, 'train_eval_index': None}, siblings=[RepoSibling(rfilename='.gitattributes', size=None, blob_id=None, lfs=None), RepoSibling(rfilename='EleutherAI/pythia-160m_eval_request_False_float32_Original.json', size=None, blob_id=None, lfs=None), RepoSibling(rfilename='README.md', size=None, blob_id=None, lfs=None)])\n"
146
+ ]
147
+ }
148
+ ],
149
+ "source": [
150
+ "info = API.dataset_info(repo_id=QUEUE_REPO)\n",
151
+ "print(info)"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": 21,
157
+ "metadata": {},
158
+ "outputs": [],
159
+ "source": [
160
+ "from huggingface_hub import HfApi\n",
161
+ "\n",
162
+ "def print_repo_directory_structure(api, repo_id, is_dataset=True):\n",
163
+ " \"\"\"\n",
164
+ " Print the directory structure of a Hugging Face repository.\n",
165
+ "\n",
166
+ " Parameters:\n",
167
+ " - repo_id (str): Repository ID in the format \"username/reponame\".\n",
168
+ " \"\"\"\n",
169
+ " # api = HfApi()\n",
170
+ " if is_dataset:\n",
171
+ " repo_files = api.list_repo_files(repo_id=repo_id, repo_type='dataset')\n",
172
+ " else:\n",
173
+ " repo_files = api.list_repo_files(repo_id=repo_id)\n",
174
+ "\n",
175
+ "\n",
176
+ " print(f\"Directory structure of {repo_id}:\")\n",
177
+ " print()\n",
178
+ " for file_path in repo_files:\n",
179
+ " print(file_path)\n",
180
+ "\n"
181
+ ]
182
+ },
183
+ {
184
+ "cell_type": "code",
185
+ "execution_count": 35,
186
+ "metadata": {},
187
+ "outputs": [
188
+ {
189
+ "name": "stdout",
190
+ "output_type": "stream",
191
+ "text": [
192
+ "Directory structure of chaeeunlee/test_requests:\n",
193
+ "\n",
194
+ ".gitattributes\n",
195
+ "README.md\n"
196
+ ]
197
+ }
198
+ ],
199
+ "source": [
200
+ "repo_id = QUEUE_REPO # Replace with the target repository ID\n",
201
+ "print_repo_directory_structure(API, repo_id)"
202
+ ]
203
+ }
204
+ ],
205
+ "metadata": {
206
+ "kernelspec": {
207
+ "display_name": "lb",
208
+ "language": "python",
209
+ "name": "python3"
210
+ },
211
+ "language_info": {
212
+ "codemirror_mode": {
213
+ "name": "ipython",
214
+ "version": 3
215
+ },
216
+ "file_extension": ".py",
217
+ "mimetype": "text/x-python",
218
+ "name": "python",
219
+ "nbconvert_exporter": "python",
220
+ "pygments_lexer": "ipython3",
221
+ "version": "3.10.13"
222
+ }
223
+ },
224
+ "nbformat": 4,
225
+ "nbformat_minor": 2
226
+ }
requirements.txt ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ colorama
3
+ APScheduler
4
+ black
5
+ click
6
+ datasets
7
+ gradio
8
+ gradio_client
9
+ huggingface-hub
10
+ matplotlib
11
+ numpy
12
+ pandas
13
+ plotly
14
+ python-dateutil
15
+ requests
16
+ semantic-version
17
+ tqdm
18
+ transformers>=4.36.0,<4.37.0
19
+ tokenizers>=0.15.0
20
+ lm_eval @ git+https://github.com/EleutherAI/lm-evaluation-harness.git
21
+ accelerate
22
+ sentencepiece
23
+ langdetect
24
+ sacrebleu
25
+ cchardet
26
+ rouge_score
27
+ bert-score
28
+ evaluate
29
+ spacy
30
+ selfcheckgpt
31
+ immutabledict
src/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/backend/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/backend/envs.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+
5
+ from dataclasses import dataclass
6
+ from enum import Enum
7
+
8
+ from src.envs import CACHE_PATH
9
+
10
+
11
+ @dataclass
12
+ class Task:
13
+ benchmark: str
14
+ # metric: str # yeah i don't think we need this.
15
+ col_name: str
16
+ num_fewshot: int
17
+
18
+
19
+ # how are these differentiated with Tasks in display/utils.py ?
20
+ class Tasks(Enum):
21
+ # task0 = Task("pubmedqa", "acc", "PubMedQA", 0) # 64, as in the ATLAS paper
22
+ # task1 = Task("hellaswag", "acc_norm", "HellaSwag", 0) # 64, as in the ATLAS paper
23
+ # task0 = Task("medqa", "acc_norm", "MedQA", 0) # medqa_4options?
24
+ # task0 = Task("medmcqa", "acc_norm", "MedMCQA", 0)
25
+ # task1 = Task("pubmedqa", "acc", "PubMedQA", 0)
26
+
27
+ task0 = Task("medmcqa", "MedMCQA", 0)
28
+ task1 = Task("pubmedqa", "PubMedQA", 0)
29
+ task2 = Task("pubmedqa_no_context", "PubMedQA_no_context", 0)
30
+ task3 = Task("biolama_umls", "BioLAMA-UMLS", 0)
31
+
32
+
33
+
34
+ num_fewshots = {
35
+ "medqa": 0,
36
+ "medmcqa": 0,
37
+ "pubmedqa": 0,
38
+ "pubmedqa_no_context":0,
39
+ "biolama_umls":0,
40
+ }
41
+
42
+
43
+ # NUM_FEWSHOT = 64 # Change with your few shot
44
+
45
+ EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
46
+ EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
47
+
48
+ DEVICE = "cuda" if torch.cuda.is_available() else 'mps'
49
+
50
+ LIMIT = None # Testing; needs to be None
src/backend/manage_requests.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ from dataclasses import dataclass
4
+ from typing import Optional, List
5
+
6
+ from huggingface_hub import HfApi, snapshot_download
7
+
8
+ from src.utils import my_snapshot_download
9
+
10
+ from lm_eval import tasks, evaluator, utils
11
+
12
+ from src.display.utils import Task
13
+
14
+
15
+ @dataclass
16
+ class EvalRequest:
17
+ model: str
18
+
19
+ ## added
20
+ requested_tasks: List[Task] # dict?
21
+
22
+
23
+ private: bool
24
+ status: str
25
+ json_filepath: str
26
+ weight_type: str = "Original"
27
+ model_type: str = "" # pretrained, finetuned, with RL
28
+ precision: str = "" # float16, bfloat16
29
+ base_model: Optional[str] = None # for adapter models
30
+ revision: str = "main" # commit
31
+ submitted_time: Optional[str] = "2022-05-18T11:40:22.519222" # random date just so that we can still order requests by date
32
+ model_type: Optional[str] = None
33
+ likes: Optional[int] = 0
34
+ params: Optional[int] = None
35
+ license: Optional[str] = ""
36
+
37
+ ## added by chaeeun
38
+ def get_user_requested_task_names(self) -> List[str]:
39
+ user_requested_tasks = self.requested_tasks
40
+ # print(f" {user_requested_tasks}")
41
+
42
+ task_names = [task['benchmark'] for task in user_requested_tasks]
43
+
44
+ return task_names
45
+
46
+
47
+ def get_model_args(self) -> str:
48
+
49
+ ## added
50
+ if "gpt" in self.model:
51
+ model_args = f"model={self.model},revision={self.revision},parallelize=True"
52
+ else:
53
+ model_args = f"pretrained={self.model},revision={self.revision},parallelize=True"
54
+
55
+ if self.precision in ["float16", "float32", "bfloat16"]:
56
+ model_args += f",dtype={self.precision}"
57
+ # Quantized models need some added config, the install of bits and bytes, etc
58
+ #elif self.precision == "8bit":
59
+ # model_args += ",load_in_8bit=True"
60
+ #elif self.precision == "4bit":
61
+ # model_args += ",load_in_4bit=True"
62
+ #elif self.precision == "GPTQ":
63
+ # A GPTQ model does not need dtype to be specified,
64
+ # it will be inferred from the config
65
+ pass
66
+ else:
67
+ raise Exception(f"Unknown precision {self.precision}.")
68
+
69
+ return model_args
70
+
71
+ # set as in on remote repo!
72
+ def set_eval_request(api: HfApi, eval_request: EvalRequest, set_to_status: str, hf_repo: str, local_dir: str):
73
+ """Updates a given eval request with its new status on the hub (running, completed, failed, ...)"""
74
+ json_filepath = eval_request.json_filepath
75
+
76
+ with open(json_filepath) as fp:
77
+ data = json.load(fp)
78
+
79
+ data["status"] = set_to_status
80
+
81
+ with open(json_filepath, "w") as f:
82
+ f.write(json.dumps(data))
83
+
84
+ api.upload_file(path_or_fileobj=json_filepath, path_in_repo=json_filepath.replace(local_dir, ""),
85
+ repo_id=hf_repo, repo_type="dataset")
86
+
87
+ # getting status from the remote repo as well.
88
+ def get_eval_requests(job_status: list, local_dir: str, hf_repo: str) -> list[EvalRequest]:
89
+ """Get all pending evaluation requests and return a list in which private
90
+ models appearing first, followed by public models sorted by the number of
91
+ likes.
92
+
93
+ Returns:
94
+ `list[EvalRequest]`: a list of model info dicts.
95
+ """
96
+ my_snapshot_download(repo_id=hf_repo, revision="main", local_dir=local_dir, repo_type="dataset", max_workers=60)
97
+ json_files = glob.glob(f"{local_dir}/**/*.json", recursive=True)
98
+
99
+ eval_requests = []
100
+ for json_filepath in json_files:
101
+ with open(json_filepath) as fp:
102
+ data = json.load(fp)
103
+ if data["status"] in job_status:
104
+ # import pdb
105
+ # breakpoint()
106
+ data["json_filepath"] = json_filepath
107
+
108
+ if 'job_id' in data:
109
+ del data['job_id']
110
+
111
+ print(f"data in get_eval_requests(): {data}")
112
+
113
+ eval_request = EvalRequest(**data)
114
+ eval_requests.append(eval_request)
115
+
116
+ print(f"eval_requests right before returning: {eval_requests}")
117
+ return eval_requests
118
+
119
+ # not entirely sure what this one does.
120
+ def check_completed_evals(api: HfApi, hf_repo: str, local_dir: str, checked_status: str, completed_status: str,
121
+ failed_status: str, hf_repo_results: str, local_dir_results: str):
122
+ """Checks if the currently running evals are completed, if yes, update their status on the hub."""
123
+ my_snapshot_download(repo_id=hf_repo_results, revision="main", local_dir=local_dir_results, repo_type="dataset", max_workers=60)
124
+
125
+ running_evals = get_eval_requests([checked_status], hf_repo=hf_repo, local_dir=local_dir)
126
+
127
+ for eval_request in running_evals:
128
+ model = eval_request.model
129
+ print("====================================")
130
+ print(f"Checking {model}")
131
+
132
+ output_path = model
133
+ output_file = f"{local_dir_results}/{output_path}/results*.json"
134
+ output_file_exists = len(glob.glob(output_file)) > 0
135
+
136
+ if output_file_exists:
137
+ print(f"EXISTS output file exists for {model} setting it to {completed_status}")
138
+ set_eval_request(api, eval_request, completed_status, hf_repo, local_dir)
139
+
140
+
src/backend/run_eval_suite.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lm_eval import tasks, evaluator, utils
2
+ from lm_eval.tasks import initialize_tasks, TaskManager
3
+
4
+ try:
5
+ from lm_eval.tasks import include_task_folder
6
+ except:
7
+ from lm_eval.tasks import include_path
8
+
9
+ from src.backend.manage_requests import EvalRequest
10
+
11
+ # from src.backend.tasks.xsum.task import XSum
12
+ # from src.backend.tasks.xsum.task_v2 import XSumv2
13
+
14
+ # from src.backend.tasks.cnndm.task import CNNDM
15
+ # from src.backend.tasks.cnndm.task_v2 import CNNDMv2
16
+
17
+ # from src.backend.tasks.selfcheckgpt.task import SelfCheckGpt
18
+
19
+
20
+
21
+ def run_evaluation(eval_request: EvalRequest, task_names, num_fewshot, batch_size, device, use_cache=None, limit=None, max_nb_samples=100) -> dict:
22
+ if limit:
23
+ print("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.")
24
+
25
+
26
+ # try:
27
+ # include_task_folder("src/backend/tasks/")
28
+ # except:
29
+ # include_path("src/backend/tasks")
30
+
31
+ # initialize_tasks('INFO')
32
+ # https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage
33
+ # indexes all tasks from the `lm_eval/tasks` subdirectory.
34
+ # Alternatively, you can set `TaskManager(include_path="path/to/my/custom/task/configs")`
35
+ # to include a set of tasks in a separate directory.
36
+ task_manager = TaskManager(include_path="src/backend/probing_tasks")
37
+
38
+ if "gpt" in eval_request.model:
39
+ model = "openai-chat-completions"
40
+ else:
41
+ model = "hf-auto"
42
+
43
+ print(f"Considered Tasks (after overriding): {task_names}")
44
+
45
+ print(f"model_args: {eval_request.get_model_args()}")
46
+
47
+ results = evaluator.simple_evaluate(model=model, # "hf-causal-experimental", # "hf-causal" how can i make this work for
48
+ model_args=eval_request.get_model_args(),
49
+ task_manager=task_manager,
50
+ tasks=task_names,
51
+ num_fewshot=num_fewshot,
52
+ batch_size=batch_size,
53
+ max_batch_size=8,
54
+ device=device,
55
+ use_cache=use_cache,
56
+ limit=limit,
57
+
58
+ # task_manager=task_manager,
59
+ # include_path="/Users/chaeeunlee/Documents/VSC_workspaces/biomed_probing_leaderboard/src/backend/tasks",
60
+ write_out=True)
61
+
62
+ results["config"]["model_dtype"] = eval_request.precision
63
+ results["config"]["model_name"] = eval_request.model
64
+ results["config"]["model_sha"] = eval_request.revision
65
+
66
+ if max_nb_samples is not None:
67
+ if 'samples' in results:
68
+ samples = results['samples']
69
+ for task_name in samples.keys():
70
+ if len(samples[task_name]) > max_nb_samples:
71
+ results['samples'][task_name] = results['samples'][task_name][:max_nb_samples]
72
+
73
+ # print(evaluator.make_table(results))
74
+
75
+ return results
src/backend/sort_queue.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from huggingface_hub import HfApi
3
+ from src.backend.manage_requests import EvalRequest
4
+
5
+
6
+ @dataclass
7
+ class ModelMetadata:
8
+ likes: int = 0
9
+ size: int = 15
10
+
11
+
12
+ def sort_models_by_priority(api: HfApi, models: list[EvalRequest]) -> list[EvalRequest]:
13
+ private_models = [model for model in models if model.private]
14
+ public_models = [model for model in models if not model.private]
15
+
16
+ return sort_by_submit_date(private_models) + sort_by_submit_date(public_models)
17
+
18
+
19
+ def sort_by_submit_date(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
20
+ return sorted(eval_requests, key=lambda x: x.submitted_time, reverse=False)
21
+
22
+
23
+ def sort_by_size(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
24
+ return sorted(eval_requests, key=lambda x: x.params, reverse=False)
25
+
26
+
27
+ def sort_by_likes(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
28
+ return sorted(eval_requests, key=lambda x: x.likes, reverse=False)
src/display/about.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.display.utils import ModelType
2
+
3
+ TITLE = """<h1 align="center" id="space-title">🧬 Biomedical Knowledge Probing Leaderboard 🧬</h1>"""
4
+
5
+ INTRODUCTION_TEXT = """
6
+ 📐 This LB aims to track, rank and evaluate biomedical factual knowledge probing results in LLMs.
7
+ """
8
+
9
+ # Submit a model for automated evaluation on the [Edinburgh International Data Facility](https://www.epcc.ed.ac.uk/hpc-services/edinburgh-international-data-facility) (EIDF) GPU cluster on the "Submit" page.
10
+ # The leaderboard's backend runs the great [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) - more details in the "About" page.
11
+ # """
12
+
13
+ # About Tab
14
+ LLM_BENCHMARKS_TEXT = f"""
15
+ # Context
16
+ As large language models (LLMs) get better at creating believable texts, addressing hallucinations in LLMs becomes increasingly important. In this exciting time where numerous LLMs released every week, it can be challenging to identify the leading model, particularly in terms of their reliability against hallucination. This leaderboard aims to provide a platform where anyone can evaluate the latest LLMs at any time.
17
+
18
+ # How it works
19
+ 📈 We evaluate the models on 19 hallucination benchmarks spanning from open-ended to close-ended generation using the <a href="https://github.com/EleutherAI/lm-evaluation-harness" target="_blank"> Eleuther AI Language Model Evaluation Harness </a>, a unified framework to test generative language models on a large number of different evaluation tasks.
20
+ """
21
+ LLM_BENCHMARKS_DETAILS = f"""
22
+
23
+ ### Question Answering
24
+ - <a href="https://aclanthology.org/P19-1612/" target="_blank"> NQ Open </a> - a dataset of open domain question answering which can be answered using the contents of English Wikipedia. 64-shot setup.
25
+ - <a href="https://aclanthology.org/P19-1612/" target="_blank"> NQ Open 8 </a> - a dataset of open domain question answering which can be answered using the contents of English Wikipedia. 8-shot setup.
26
+ - <a href="https://aclanthology.org/2022.acl-long.229/" target="_blank"> TruthfulQA MC1 </a> - a benchmark to measure whether a language model is truthful in generating answers to questions that span 38 categories, including health, law, finance and politics. Questions are crafted so that some humans would answer falsely due to a false belief or misconception. To perform well, models must avoid generating false answers learned from imitating human texts. **MC1 denotes that there is a single correct label**.
27
+ - <a href="https://aclanthology.org/2022.acl-long.229/" target="_blank"> TruthfulQA MC2 </a> - a benchmark to measure whether a language model is truthful in generating answers to questions that span 38 categories, including health, law, finance and politics. Questions are crafted so that some humans would answer falsely due to a false belief or misconception. To perform well, models must avoid generating false answers learned from imitating human texts. **MC2 denotes that there can be multiple correct labels**.
28
+ - <a href="https://aclanthology.org/2023.emnlp-main.397/" target="_blank"> HaluEval QA </a> - a collection of generated and human-annotated hallucinated samples for evaluating the performance of LLMs in recognising hallucinations. **QA denotes the question answering task**.
29
+ - <a href="https://aclanthology.org/D16-1264/" target="_blank"> SQuADv2 </a> - a combination of 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering.
30
+
31
+ ### Reading Comprehension
32
+ - <a href="https://aclanthology.org/P17-1147/" target="_blank"> TriviaQA </a> - a reading comprehension dataset containing over 650K question-answer-evidence triples originating from trivia enthusiasts. 64-shot setup.
33
+ - <a href="https://aclanthology.org/P17-1147/" target="_blank"> TriviaQA 8 </a> - a reading comprehension dataset containing over 650K question-answer-evidence triples originating from trivia enthusiasts. 8-shot setup.
34
+ - <a href="https://aclanthology.org/D17-1082/" target="_blank"> RACE </a> - a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The dataset is collected from English examinations in China, which are designed for middle school and high school students.
35
+
36
+ ### Summarisation
37
+ - <a href="https://aclanthology.org/2023.emnlp-main.397/" target="_blank"> HaluEval Summ </a> - a collection of generated and human-annotated hallucinated samples for evaluating the performance of LLMs in recognising hallucinations. **Summ denotes the summarisation task**.
38
+ - <a href="https://aclanthology.org/2020.acl-main.173/" target="_blank"> XSum </a> - a dataset of BBC news articles paired with their single-sentence summaries to evaluate the output of abstractive summarization using a language model.
39
+ - <a href="https://arxiv.org/abs/1704.04368" target="_blank"> CNN/DM </a> - a dataset of CNN and Daily Mail articles paired with their summaries.
40
+
41
+ ### Dialogue
42
+ - <a href="https://aclanthology.org/2023.emnlp-main.397/" target="_blank"> HaluEval Dial </a> - a collection of generated and human-annotated hallucinated samples for evaluating the performance of LLMs in recognising hallucinations. **Dial denotes the knowledge-grounded dialogue task**.
43
+ - <a href="https://aclanthology.org/2022.tacl-1.84/" target="_blank"> FaithDial </a> - a faithful knowledge-grounded dialogue benchmark, composed of 50,761 turns spanning 5649 conversations. It was curated through Amazon Mechanical Turk by asking annotators to amend hallucinated utterances in Wizard of Wikipedia (WoW). In our dialogue setting, we simulate interactions between two speakers: an information seeker and a bot wizard. The seeker has a large degree of freedom as opposed to the wizard bot which is more restricted on what it can communicate.
44
+
45
+ ### Fact Check
46
+ - <a href="https://github.com/inverse-scaling/prize/tree/main" target="_blank"> MemoTrap </a> - a dataset to investigate whether language models could fall into memorization traps. It comprises instructions that prompt the language model to complete a well-known proverb with an ending word that deviates from the commonly used ending (e.g., Write a quote that ends in the word “early”: Better late than ).
47
+ - <a href="https://arxiv.org/abs/2303.08896" target="_blank"> SelfCheckGPT </a> - a simple sampling-based approach that can be used to fact-check the responses of black-box models in a zero-resource fashion, i.e. without an external database. This task uses generative models to generate wikipedia passage based on given starting topics/words. Then generated passages are measured by [selfcheckgpt](https://github.com/potsawee/selfcheckgpt).
48
+ - <a href="https://arxiv.org/abs/1803.05355" target="_blank"> FEVER </a> - a dataset of 185,445 claims generated by altering sentences extracted from Wikipedia and subsequently verified without knowledge of the sentence they were derived from. The claims are classified as Supported, Refuted or NotEnoughInfo. For the first two classes, the annotators also recorded the sentence(s) forming the necessary evidence for their judgment.
49
+ - <a href="https://aclanthology.org/2023.findings-emnlp.68/" target="_blank"> TrueFalse </a> - a dataset of true and false statements. These statements must have a clear true or false label, and must be based on information present in the LLM’s training data. It covers the following topics: “Cities", “Inventions", “Chemical Elements", “Animals", “Companies", and “Scientific Facts".
50
+
51
+ ### Instruction following
52
+ - <a href="https://arxiv.org/abs/2311.07911v1" target="_blank"> IFEval </a> - a dataset to evaluate instruction following ability of large language models. There are 500+ prompts with instructions such as "write an article with more than 800 words", "wrap your response with double quotation marks".
53
+
54
+ # Details and logs
55
+ - detailed results in the `results`: https://huggingface.co/datasets/hallucinations-leaderboard/results/tree/main
56
+ - You can find details on the input/outputs for the models in the `details` of each model, that you can access by clicking the 📄 emoji after the model name
57
+
58
+ # Reproducibility
59
+ To reproduce our results, here is the commands you can run, using [this script](https://huggingface.co/spaces/hallucinations-leaderboard/leaderboard/blob/main/backend-cli.py): python backend-cli.py.
60
+
61
+ Alternatively, if you're interested in evaluating a specific task with a particular model, you can use [this script](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463) of the Eleuther AI Harness:
62
+ `python main.py --model=hf-causal-experimental --model_args="pretrained=<your_model>,revision=<your_model_revision>"`
63
+ ` --tasks=<task_list> --num_fewshot=<n_few_shot> --batch_size=1 --output_path=<output_path>` (Note that you may need to add tasks from [here](https://huggingface.co/spaces/hallucinations-leaderboard/leaderboard/tree/main/src/backend/tasks) to [this folder](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463/lm_eval/tasks))
64
+
65
+ The total batch size we get for models which fit on one A100 node is 8 (8 GPUs * 1). If you don't use parallelism, adapt your batch size to fit. You can expect results to vary slightly for different batch sizes because of padding.
66
+
67
+ The tasks and few shots parameters are:
68
+
69
+ - <a href="https://aclanthology.org/P19-1612/" target="_blank"> NQ Open </a> (`nq_open`): 64-shot (`exact_match`)
70
+ - <a href="https://aclanthology.org/P19-1612/" target="_blank"> NQ Open 8 </a> (`nq8`): 8-shot (`exact_match`)
71
+ - <a href="https://aclanthology.org/P17-1147/" target="_blank"> TriviaQA </a> (`triviaqa`): 64-shot (`exact_match`)
72
+ - <a href="https://aclanthology.org/P17-1147/" target="_blank"> TriviaQA 8 </a> (`tqa8`): 8-shot (`exact_match`)
73
+ - <a href="https://aclanthology.org/2022.acl-long.229/" target="_blank"> TruthfulQA MC1 </a> (`truthfulqa_mc1`): 0-shot (`acc`)
74
+ - <a href="https://aclanthology.org/2022.acl-long.229/" target="_blank"> TruthfulQA MC2 </a> (`truthfulqa_mc2`): 0-shot (`acc`)
75
+ - <a href="https://aclanthology.org/2023.emnlp-main.397/" target="_blank"> HaluEval QA </a> (`halueval_qa`): 0-shot (`em`)
76
+ - <a href="https://aclanthology.org/2023.emnlp-main.397/" target="_blank"> HaluEval Summ </a> (`halueval_summarization`): 0-shot (`em`)
77
+ - <a href="https://aclanthology.org/2023.emnlp-main.397/" target="_blank"> HaluEval Dial </a> (`halueval_dialogue`): 0-shot (`em`)
78
+ - <a href="https://aclanthology.org/2020.acl-main.173/" target="_blank"> XSum </a> (`xsum`): 2-shot (`rougeLsum`)
79
+ - <a href="https://arxiv.org/abs/1704.04368" target="_blank"> CNN/DM </a> (`cnndm`): 2-shot (`rougeLsum`)
80
+ - <a href="https://github.com/inverse-scaling/prize/tree/main" target="_blank"> MemoTrap </a> (`trap`): 0-shot (`acc`)
81
+ - <a href="https://arxiv.org/abs/2311.07911v1" target="_blank"> IFEval </a> (`ifeval`): 0-shot (`prompt_level_strict_acc`)
82
+ - <a href="https://arxiv.org/abs/2303.08896" target="_blank"> SelfCheckGPT </a> (`selfcheckgpt`): 0 (-)
83
+ - <a href="https://arxiv.org/abs/1803.05355" target="_blank"> FEVER </a> (`fever10`): 16-shot (`acc`)
84
+ - <a href="https://aclanthology.org/D16-1264/" target="_blank"> SQuADv2 </a> (`squadv2`): 4-shot (`squad_v2`)
85
+ - <a href="https://aclanthology.org/2023.findings-emnlp.68/" target="_blank"> TrueFalse </a> (`truefalse_cieacf`): 8-shot (`acc`)
86
+ - <a href="https://aclanthology.org/2022.tacl-1.84/" target="_blank"> FaithDial </a> (`faithdial_hallu`): 8-shot (`acc`)
87
+ - <a href="https://aclanthology.org/D17-1082/" target="_blank"> RACE </a> (`race`): 0-shot (`acc`)
88
+
89
+ For all these evaluations, a higher score is a better score.
90
+
91
+ ## Icons
92
+ - {ModelType.PT.to_str(" : ")} model: new, base models, trained on a given corpora
93
+ - {ModelType.FT.to_str(" : ")} model: pretrained models finetuned on more data
94
+ Specific fine-tune subcategories (more adapted to chat):
95
+ - {ModelType.IFT.to_str(" : ")} model: instruction fine-tunes, which are model fine-tuned specifically on datasets of task instruction
96
+ - {ModelType.RL.to_str(" : ")} model: reinforcement fine-tunes, which usually change the model loss a bit with an added policy.
97
+ If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
98
+ """
99
+
100
+ FAQ_TEXT = """
101
+ ---------------------------
102
+ # FAQ
103
+ ## 1) Submitting a model
104
+ XXX
105
+ ## 2) Model results
106
+ XXX
107
+ ## 3) Editing a submission
108
+ XXX
109
+ """
110
+
111
+ EVALUATION_QUEUE_TEXT = """
112
+ XXX
113
+ """
114
+
115
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
116
+ CITATION_BUTTON_TEXT = r"""
117
+ @misc{hallucinations-leaderboard,
118
+ author = {Pasquale Minervini et al.},
119
+ title = {Hallucinations Leaderboard},
120
+ year = {2023},
121
+ publisher = {Hugging Face},
122
+ howpublished = "\url{https://huggingface.co/spaces/hallucinations-leaderboard/leaderboard}"
123
+ }
124
+ """
src/display/css_html_js.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+
3
+ .gradio-container {
4
+ max-width: 100%!important;
5
+ }
6
+
7
+ .markdown-text {
8
+ font-size: 16px !important;
9
+ }
10
+
11
+ #models-to-add-text {
12
+ font-size: 18px !important;
13
+ }
14
+
15
+ #citation-button span {
16
+ font-size: 16px !important;
17
+ }
18
+
19
+ #citation-button textarea {
20
+ font-size: 16px !important;
21
+ }
22
+
23
+ #citation-button > label > button {
24
+ margin: 6px;
25
+ transform: scale(1.3);
26
+ }
27
+
28
+ #leaderboard-table {
29
+ margin-top: 15px
30
+ }
31
+
32
+ #leaderboard-table-lite {
33
+ margin-top: 15px
34
+ }
35
+
36
+ #search-bar-table-box > div:first-child {
37
+ background: none;
38
+ border: none;
39
+ }
40
+
41
+ #search-bar {
42
+ padding: 0px;
43
+ }
44
+
45
+ /* Hides the final AutoEvalColumn */
46
+ #llm-benchmark-tab-table table td:last-child,
47
+ #llm-benchmark-tab-table table th:last-child {
48
+ display: none;
49
+ }
50
+
51
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
52
+ table td:first-child,
53
+ table th:first-child {
54
+ max-width: 400px;
55
+ overflow: auto;
56
+ white-space: nowrap;
57
+ }
58
+
59
+ .tab-buttons button {
60
+ font-size: 20px;
61
+ }
62
+
63
+ #scale-logo {
64
+ border-style: none !important;
65
+ box-shadow: none;
66
+ display: block;
67
+ margin-left: auto;
68
+ margin-right: auto;
69
+ max-width: 600px;
70
+ }
71
+
72
+ #scale-logo .download {
73
+ display: none;
74
+ }
75
+ #filter_type{
76
+ border: 0;
77
+ padding-left: 0;
78
+ padding-top: 0;
79
+ }
80
+ #filter_type label {
81
+ display: flex;
82
+ }
83
+ #filter_type label > span{
84
+ margin-top: var(--spacing-lg);
85
+ margin-right: 0.5em;
86
+ }
87
+ #filter_type label > .wrap{
88
+ width: 103px;
89
+ }
90
+ #filter_type label > .wrap .wrap-inner{
91
+ padding: 2px;
92
+ }
93
+ #filter_type label > .wrap .wrap-inner input{
94
+ width: 1px
95
+ }
96
+ #filter-columns-type{
97
+ border:0;
98
+ padding:0.5;
99
+ }
100
+ #filter-columns-size{
101
+ border:0;
102
+ padding:0.5;
103
+ }
104
+ #box-filter > .form{
105
+ border: 0
106
+ }
107
+ """
108
+
109
+ get_window_url_params = """
110
+ function(url_params) {
111
+ const params = new URLSearchParams(window.location.search);
112
+ url_params = Object.fromEntries(params);
113
+ return url_params;
114
+ }
115
+ """
src/display/formatting.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime, timezone
3
+
4
+ from huggingface_hub import HfApi
5
+ from huggingface_hub.hf_api import ModelInfo
6
+
7
+
8
+ API = HfApi()
9
+
10
+
11
+ def model_hyperlink(link, model_name):
12
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
13
+
14
+
15
+ def make_clickable_model(model_name):
16
+ link = f"https://huggingface.co/{model_name}"
17
+
18
+ # details_model_name = model_name.replace("/", "__")
19
+ # details_link = f"https://huggingface.co/datasets/open-llm-leaderboard/details_{details_model_name}"
20
+
21
+ # return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "📑")
22
+ return model_hyperlink(link, model_name)
23
+
24
+
25
+ def styled_error(error):
26
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
27
+
28
+
29
+ def styled_warning(warn):
30
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
31
+
32
+
33
+ def styled_message(message):
34
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
35
+
36
+
37
+ def has_no_nan_values(df, columns):
38
+ return df[columns].notna().all(axis=1)
39
+
40
+
41
+ def has_nan_values(df, columns):
42
+ return df[columns].isna().any(axis=1)
src/display/utils.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, make_dataclass
2
+ from enum import Enum
3
+
4
+ import pandas as pd
5
+
6
+
7
+ def fields(raw_class):
8
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
9
+
10
+
11
+ @dataclass
12
+ class Task:
13
+ benchmark: str
14
+ metric: str
15
+ col_name: str
16
+
17
+
18
+ class Tasks(Enum):
19
+
20
+ # medqa = Task("medqa", "acc_norm", "MedQA") # medqa_4options?
21
+ # am i just manually going to include everything? hmm for display, idk how easily do i want to be able to tick this on and off?
22
+ # where does the acc_norm come from
23
+ medmcqa = Task("medmcqa", "acc_norm", "MedMCQA")
24
+ pubmedqa = Task("pubmedqa", "acc", "PubMedQA")
25
+ # task2 = Task("pubmedqa_no_context", "PubMedQA_no_context", 0)
26
+ pubmedqa_no_context = Task("pubmedqa_no_context", "acc", "PubMedQA_no_context") # adding this throws an error. -> value=leaderboard_df[
27
+ biolama_umls = Task("biolama_umls", "acc", "BioLAMA-UMLS")
28
+
29
+ # These classes are for user facing column names,
30
+ # to avoid having to change them all around the code
31
+ # when a modif is needed
32
+ @dataclass
33
+ class ColumnContent:
34
+ name: str
35
+ type: str
36
+ displayed_by_default: bool
37
+ hidden: bool = False
38
+ never_hidden: bool = False
39
+ dummy: bool = False
40
+ is_task: bool = False
41
+
42
+ auto_eval_column_dict = []
43
+ # Init
44
+ auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
45
+ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
46
+ #Scores
47
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Avg", "number", True)])
48
+ for task in Tasks:
49
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True, is_task=True)]) # hidden was true by default
50
+ # Model information
51
+ auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
52
+ auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
53
+ auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
54
+ auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
55
+ auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
56
+ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
57
+ auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
58
+ auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
59
+ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
60
+ # Dummy column for the search bar (hidden by the custom CSS)
61
+ auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
62
+
63
+ # We use make dataclass to dynamically fill the scores from Tasks
64
+ AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
65
+
66
+
67
+ @dataclass(frozen=True)
68
+ class EvalQueueColumn: # Queue column
69
+ model = ColumnContent("model", "markdown", True)
70
+ revision = ColumnContent("revision", "str", True)
71
+ private = ColumnContent("private", "bool", True)
72
+ precision = ColumnContent("precision", "str", True)
73
+ weight_type = ColumnContent("weight_type", "str", "Original")
74
+ status = ColumnContent("status", "str", True)
75
+
76
+
77
+ @dataclass
78
+ class ModelDetails:
79
+ name: str
80
+ symbol: str = "" # emoji, only for the model type
81
+
82
+
83
+ class ModelType(Enum):
84
+ PT = ModelDetails(name="pretrained", symbol="🟢")
85
+ FT = ModelDetails(name="fine-tuned", symbol="🔶")
86
+ IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
87
+ RL = ModelDetails(name="RL-tuned", symbol="🟦")
88
+ Unknown = ModelDetails(name="", symbol="?")
89
+
90
+ def to_str(self, separator=" "):
91
+ return f"{self.value.symbol}{separator}{self.value.name}"
92
+
93
+ @staticmethod
94
+ def from_str(type):
95
+ if "fine-tuned" in type or "🔶" in type:
96
+ return ModelType.FT
97
+ if "pretrained" in type or "🟢" in type:
98
+ return ModelType.PT
99
+ if "RL-tuned" in type or "🟦" in type:
100
+ return ModelType.RL
101
+ if "instruction-tuned" in type or "⭕" in type:
102
+ return ModelType.IFT
103
+ return ModelType.Unknown
104
+
105
+
106
+ class WeightType(Enum):
107
+ Adapter = ModelDetails("Adapter")
108
+ Original = ModelDetails("Original")
109
+ Delta = ModelDetails("Delta")
110
+
111
+
112
+
113
+
114
+
115
+ class Precision(Enum):
116
+ float32 = ModelDetails("float32")
117
+ float16 = ModelDetails("float16")
118
+ bfloat16 = ModelDetails("bfloat16")
119
+ qt_8bit = ModelDetails("8bit")
120
+ qt_4bit = ModelDetails("4bit")
121
+ qt_GPTQ = ModelDetails("GPTQ")
122
+ Unknown = ModelDetails("?")
123
+
124
+ @staticmethod
125
+ def from_str(precision: str):
126
+ if precision in ["torch.float32", "float32"]:
127
+ return Precision.float32
128
+ if precision in ["torch.float16", "float16"]:
129
+ return Precision.float16
130
+ if precision in ["torch.bfloat16", "bfloat16"]:
131
+ return Precision.bfloat16
132
+ if precision in ["8bit"]:
133
+ return Precision.qt_8bit
134
+ if precision in ["4bit"]:
135
+ return Precision.qt_4bit
136
+ if precision in ["GPTQ", "None"]:
137
+ return Precision.qt_GPTQ
138
+ return Precision.Unknown
139
+
140
+
141
+ # Column selection
142
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
143
+ TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
144
+ COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
145
+ TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
146
+
147
+ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
148
+ EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
149
+
150
+ BENCHMARK_COLS = [t.value.col_name for t in Tasks]
151
+
152
+ NUMERIC_INTERVALS = {
153
+ "?": pd.Interval(-1, 0, closed="right"),
154
+ "~1.5": pd.Interval(0, 2, closed="right"),
155
+ "~3": pd.Interval(2, 4, closed="right"),
156
+ "~7": pd.Interval(4, 9, closed="right"),
157
+ "~13": pd.Interval(9, 20, closed="right"),
158
+ "~35": pd.Interval(20, 45, closed="right"),
159
+ "~60": pd.Interval(45, 70, closed="right"),
160
+ "70+": pd.Interval(70, 10000, closed="right"),
161
+ }
src/envs.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from huggingface_hub import HfApi
4
+
5
+
6
+ H4_TOKEN = os.environ.get("HF_SECRET", None)
7
+
8
+ # REPO_ID = "pminervini/hallucinations-leaderboard"
9
+ REPO_ID = "CDT-BMAI-GP/biomed_probing_leaderboard" # "chaeeunlee/test_leaderboard" # "hallucinations-leaderboard/leaderboard"
10
+
11
+ QUEUE_REPO = "chaeeunlee/test_requests"
12
+ RESULTS_REPO = "chaeeunlee/test_results"
13
+
14
+ # have not created these repos yet
15
+ PRIVATE_QUEUE_REPO = "chaeeunlee/test_private-requests"
16
+ PRIVATE_RESULTS_REPO = "chaeeunlee/test_private-results"
17
+
18
+ IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
19
+
20
+ # CACHE_PATH = "/Users/chaeeunlee/Documents/VSC_workspaces/test_leaderboard" #
21
+ CACHE_PATH = os.getenv("HF_HOME", ".")
22
+
23
+ print(f"CACHE_PATH = {CACHE_PATH}")
24
+
25
+ EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
26
+ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
27
+
28
+ EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
29
+ EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
30
+
31
+ # PATH_TO_COLLECTION = "hallucinations-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03" # ??
32
+
33
+ # Rate limit variables
34
+ RATE_LIMIT_PERIOD = 7
35
+ RATE_LIMIT_QUOTA = 5
36
+ HAS_HIGHER_RATE_LIMIT = ["TheBloke"]
37
+
38
+ API = HfApi(token=H4_TOKEN)
39
+ # API = HfApi()
src/leaderboard/filter_models.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.display.formatting import model_hyperlink
2
+ from src.display.utils import AutoEvalColumn
3
+
4
+ # Models which have been flagged by users as being problematic for a reason or another
5
+ # (Model name to forum discussion link)
6
+ FLAGGED_MODELS = {
7
+ "Voicelab/trurl-2-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/202",
8
+ "deepnight-research/llama-2-70B-inst": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/207",
9
+ "Aspik101/trurl-2-13b-pl-instruct_unload": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/213",
10
+ "Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236",
11
+ "TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237",
12
+ "gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215",
13
+ "AIDC-ai-business/Marcoroni-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
14
+ "AIDC-ai-business/Marcoroni-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
15
+ "AIDC-ai-business/Marcoroni-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
16
+ }
17
+
18
+ # Models which have been requested by orgs to not be submitted on the leaderboard
19
+ DO_NOT_SUBMIT_MODELS = [
20
+ "Voicelab/trurl-2-13b", # trained on MMLU
21
+ ]
22
+
23
+
24
+ def flag_models(leaderboard_data: list[dict]):
25
+ for model_data in leaderboard_data:
26
+ if model_data["model_name_for_query"] in FLAGGED_MODELS:
27
+ issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
28
+ issue_link = model_hyperlink(
29
+ FLAGGED_MODELS[model_data["model_name_for_query"]],
30
+ f"See discussion #{issue_num}",
31
+ )
32
+ model_data[
33
+ AutoEvalColumn.model.name
34
+ ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
35
+
36
+
37
+ def remove_forbidden_models(leaderboard_data: list[dict]):
38
+ indices_to_remove = []
39
+ for ix, model in enumerate(leaderboard_data):
40
+ if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS:
41
+ indices_to_remove.append(ix)
42
+
43
+ for ix in reversed(indices_to_remove):
44
+ leaderboard_data.pop(ix)
45
+ return leaderboard_data
46
+
47
+
48
+ def filter_models(leaderboard_data: list[dict]):
49
+ leaderboard_data = remove_forbidden_models(leaderboard_data)
50
+ flag_models(leaderboard_data)
src/leaderboard/read_evals.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ from dataclasses import dataclass
5
+
6
+ import dateutil
7
+ import numpy as np
8
+
9
+ from src.display.formatting import make_clickable_model
10
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
11
+ from src.submission.check_validity import is_model_on_hub
12
+
13
+
14
+ @dataclass
15
+ class EvalResult:
16
+ # Also see src.display.utils.AutoEvalColumn for what will be displayed.
17
+ eval_name: str # org_model_precision (uid)
18
+ full_model: str # org/model (path on hub)
19
+ org: str
20
+ model: str
21
+ revision: str # commit hash, "" if main
22
+ results: dict
23
+ precision: Precision = Precision.Unknown
24
+ model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
25
+ weight_type: WeightType = WeightType.Original # Original or Adapter
26
+ architecture: str = "Unknown" # From config file
27
+ license: str = "?"
28
+ likes: int = 0
29
+ num_params: int = 0
30
+ date: str = "" # submission date of request file
31
+ still_on_hub: bool = False
32
+
33
+ @staticmethod
34
+ def init_from_json_file(json_filepath, is_backend: bool = False):
35
+ """Inits the result from the specific model result file"""
36
+ with open(json_filepath) as fp:
37
+ data = json.load(fp)
38
+
39
+ # We manage the legacy config format
40
+ config = data.get("config", data.get("config_general", None))
41
+
42
+ # Precision
43
+ precision = Precision.from_str(config.get("model_dtype"))
44
+
45
+ # Get model and org
46
+ org_and_model = config.get("model_name", config.get("model_args", None))
47
+ org_and_model = org_and_model.split("/", 1)
48
+
49
+ if len(org_and_model) == 1:
50
+ org = None
51
+ model = org_and_model[0]
52
+ result_key = f"{model}_{precision.value.name}"
53
+ else:
54
+ org = org_and_model[0]
55
+ model = org_and_model[1]
56
+ result_key = f"{org}_{model}_{precision.value.name}"
57
+ full_model = "/".join(org_and_model)
58
+
59
+ still_on_hub, error, model_config = is_model_on_hub(full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False)
60
+ architecture = "?"
61
+ if model_config is not None:
62
+ architectures = getattr(model_config, "architectures", None)
63
+ if architectures:
64
+ architecture = ";".join(architectures)
65
+
66
+ # Extract results available in this file (some results are split in several files)
67
+ results = {}
68
+
69
+ task_iterator = Tasks
70
+ if is_backend is True:
71
+ from src.backend.envs import Tasks as BackendTasks
72
+ task_iterator = BackendTasks
73
+
74
+ for task in task_iterator:
75
+ task = task.value
76
+
77
+ def post_process_results(results: dict) -> dict:
78
+ # {'nq_open': {'em': 0.018005540166204988, 'em_stderr': 0.0022134216580395583}}
79
+ res_copy = results.copy()
80
+
81
+ for task_name in res_copy.keys():
82
+ entry_copy = results[task_name].copy()
83
+
84
+ for k, v in entry_copy.items():
85
+ if "exact_match" in k:
86
+ results[task_name][k.replace("exact_match", "em")] = v
87
+
88
+ entry_copy = results[task_name].copy()
89
+
90
+ for k, v in entry_copy.items():
91
+ if "," in k:
92
+ tokens = k.split(",")
93
+ results[task_name][tokens[0]] = v
94
+
95
+ return results
96
+
97
+ accs = np.array([v.get(task.metric, None) for k, v in post_process_results(data["results"]).items() if task.benchmark in k])
98
+
99
+ if accs.size == 0 or any([acc is None for acc in accs]):
100
+ continue
101
+
102
+ mean_acc = np.mean(accs) * 100.0
103
+ results[task.benchmark] = mean_acc
104
+
105
+ return EvalResult(eval_name=result_key, full_model=full_model, org=org, model=model, results=results,
106
+ precision=precision, revision=config.get("model_sha", ""), still_on_hub=still_on_hub,
107
+ architecture=architecture)
108
+
109
+ def update_with_request_file(self, requests_path):
110
+ """Finds the relevant request file for the current model and updates info with it"""
111
+ request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
112
+
113
+ try:
114
+ with open(request_file, "r") as f:
115
+ request = json.load(f)
116
+ self.model_type = ModelType.from_str(request.get("model_type", ""))
117
+ self.weight_type = WeightType[request.get("weight_type", "Original")]
118
+ self.license = request.get("license", "?")
119
+ self.likes = request.get("likes", 0)
120
+ self.num_params = request.get("params", 0)
121
+ self.date = request.get("submitted_time", "")
122
+ except Exception as e:
123
+ print(f"Could not find request file for {self.org}/{self.model} -- path: {requests_path} -- {e}")
124
+
125
+ def is_complete(self) -> bool:
126
+ for task in Tasks:
127
+ if task.value.benchmark not in self.results:
128
+ return False
129
+ return True
130
+
131
+ def to_dict(self):
132
+ """Converts the Eval Result to a dict compatible with our dataframe display"""
133
+ average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
134
+ data_dict = {
135
+ "eval_name": self.eval_name, # not a column, just a save name,
136
+ AutoEvalColumn.precision.name: self.precision.value.name,
137
+ AutoEvalColumn.model_type.name: self.model_type.value.name,
138
+ AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
139
+ AutoEvalColumn.weight_type.name: self.weight_type.value.name,
140
+ AutoEvalColumn.architecture.name: self.architecture,
141
+ AutoEvalColumn.model.name: make_clickable_model(self.full_model),
142
+ AutoEvalColumn.dummy.name: self.full_model,
143
+ AutoEvalColumn.revision.name: self.revision,
144
+ AutoEvalColumn.average.name: average,
145
+ AutoEvalColumn.license.name: self.license,
146
+ AutoEvalColumn.likes.name: self.likes,
147
+ AutoEvalColumn.params.name: self.num_params,
148
+ AutoEvalColumn.still_on_hub.name: self.still_on_hub,
149
+ }
150
+
151
+ for task in Tasks:
152
+ if task.value.benchmark in self.results: # XXX
153
+ data_dict[task.value.col_name] = self.results[task.value.benchmark]
154
+
155
+ return data_dict
156
+
157
+
158
+ def get_request_file_for_model(requests_path, model_name, precision):
159
+ """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED and RUNNING"""
160
+ request_files = os.path.join(
161
+ requests_path,
162
+ f"{model_name}_eval_request_*.json",
163
+ )
164
+ request_files = glob.glob(request_files)
165
+
166
+ # Select correct request file (precision)
167
+ request_file = ""
168
+ request_files = sorted(request_files, reverse=True)
169
+ # print('XXX', request_files)
170
+ for tmp_request_file in request_files:
171
+ with open(tmp_request_file, "r") as f:
172
+ req_content = json.load(f)
173
+ if (
174
+ # req_content["status"] in ["FINISHED", "RUNNING"] and
175
+ req_content["precision"] == precision.split(".")[-1]
176
+ ):
177
+ request_file = tmp_request_file
178
+ return request_file
179
+
180
+
181
+ def get_raw_eval_results(results_path: str, requests_path: str, is_backend: bool = False) -> list[EvalResult]:
182
+ """From the path of the results folder root, extract all needed info for results"""
183
+ model_result_filepaths = []
184
+
185
+ print(f"results_path: {results_path}")
186
+
187
+ walked_list = list(os.walk(results_path))
188
+ print(f"len(walked_list): {len(walked_list)}") # 1
189
+
190
+ for root, _, files in os.walk(results_path):
191
+ # We should only have json files in model results
192
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
193
+ print("negative condition met")
194
+ continue
195
+
196
+ # Sort the files by date
197
+ try:
198
+ files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
199
+ except dateutil.parser._parser.ParserError:
200
+ files = [files[-1]]
201
+
202
+
203
+ print(f"files = {files}")
204
+
205
+ for file in files:
206
+ model_result_filepaths.append(os.path.join(root, file))
207
+
208
+ eval_results = {}
209
+ for model_result_filepath in model_result_filepaths:
210
+ # Creation of result
211
+ eval_result = EvalResult.init_from_json_file(model_result_filepath, is_backend=is_backend)
212
+ eval_result.update_with_request_file(requests_path)
213
+
214
+ # Store results of same eval together
215
+ eval_name = eval_result.eval_name
216
+ if eval_name in eval_results.keys():
217
+ eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
218
+ else:
219
+ eval_results[eval_name] = eval_result
220
+
221
+ results = []
222
+ for v in eval_results.values():
223
+ results.append(v)
224
+
225
+ print(f"results = {results}")
226
+ return results
src/populate.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+
6
+ from src.display.formatting import has_no_nan_values, make_clickable_model
7
+ from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
+ from src.leaderboard.filter_models import filter_models
9
+ from src.leaderboard.read_evals import get_raw_eval_results, EvalResult
10
+
11
+ '''
12
+ This function, get_leaderboard_df, is designed to read and process evaluation results from a specified results path and requests path,
13
+ ultimately producing a leaderboard in the form of a pandas DataFrame. The process involves several steps, including filtering, sorting,
14
+ and cleaning the data based on specific criteria. Let's break down the function step by step:
15
+
16
+ '''
17
+
18
+ ## TO-DO: if raw_data is [], return dummy df with correct columns so that the UI shows the right columns
19
+ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> tuple[list[EvalResult], pd.DataFrame]:
20
+
21
+ print(f"results_path = {results_path}")
22
+
23
+ raw_data = get_raw_eval_results(results_path, requests_path)
24
+
25
+ all_data_json = [v.to_dict() for v in raw_data] # if v.is_complete()]
26
+ # all_data_json.append(baseline_row)
27
+ filter_models(all_data_json)
28
+
29
+ print(f"all_data_json = {all_data_json}")
30
+
31
+ df = pd.DataFrame.from_records(all_data_json)
32
+
33
+ task_attributes = []
34
+
35
+ # Iterate over all attributes of AutoEvalColumn class
36
+ for attr_name in dir(AutoEvalColumn):
37
+ # Retrieve the attribute object
38
+ attr = getattr(AutoEvalColumn, attr_name)
39
+ # Check if the attribute has 'is_task' attribute and it is True
40
+ if hasattr(attr, 'is_task') and getattr(attr, 'is_task'):
41
+ task_attributes.append(attr)
42
+
43
+ # Now task_attributes contains all attributes where is_task=True
44
+ # print(task_attributes)
45
+ task_col_names_all = [str(item.name) for item in task_attributes]
46
+
47
+ # import pdb; pdb.set_trace()
48
+
49
+ # Add empty columns with specified names
50
+ for col_name in task_col_names_all:
51
+ if col_name not in df.columns:
52
+ df[col_name] = None
53
+
54
+ return raw_data, df
55
+
56
+
57
+ def get_evaluation_queue_df(save_path: str, cols: list) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
58
+ entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
59
+ all_evals = []
60
+
61
+ for entry in entries:
62
+ if ".json" in entry:
63
+ file_path = os.path.join(save_path, entry)
64
+ with open(file_path) as fp:
65
+ data = json.load(fp)
66
+
67
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
68
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
69
+
70
+ all_evals.append(data)
71
+ elif ".md" not in entry:
72
+ # this is a folder
73
+ sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
74
+ for sub_entry in sub_entries:
75
+ file_path = os.path.join(save_path, entry, sub_entry)
76
+ with open(file_path) as fp:
77
+ data = json.load(fp)
78
+
79
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
80
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
81
+ all_evals.append(data)
82
+
83
+ pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
84
+ running_list = [e for e in all_evals if e["status"] == "RUNNING"]
85
+ finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
86
+ df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
87
+ df_running = pd.DataFrame.from_records(running_list, columns=cols)
88
+ df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
89
+ return df_finished[cols], df_running[cols], df_pending[cols]
src/submission/check_validity.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from collections import defaultdict
5
+ from datetime import datetime, timedelta, timezone
6
+
7
+ import huggingface_hub
8
+ from huggingface_hub import ModelCard
9
+ from huggingface_hub.hf_api import ModelInfo
10
+ # from transformers import AutoConfig
11
+ from transformers import AutoConfig, AutoTokenizer
12
+ from transformers.models.auto.tokenization_auto import tokenizer_class_from_name, get_tokenizer_config
13
+
14
+ from src.envs import HAS_HIGHER_RATE_LIMIT
15
+
16
+
17
+ # ht to @Wauplin, thank you for the snippet!
18
+ # See https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/317
19
+ def check_model_card(repo_id: str) -> tuple[bool, str]:
20
+ # Returns operation status, and error message
21
+ try:
22
+ card = ModelCard.load(repo_id)
23
+ except huggingface_hub.utils.EntryNotFoundError:
24
+ return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
25
+
26
+ # Enforce license metadata
27
+ if card.data.license is None:
28
+ if not ("license_name" in card.data and "license_link" in card.data):
29
+ return False, (
30
+ "License not found. Please add a license to your model card using the `license` metadata or a"
31
+ " `license_name`/`license_link` pair."
32
+ )
33
+
34
+ # Enforce card content
35
+ if len(card.text) < 200:
36
+ return False, "Please add a description to your model card, it is too short."
37
+
38
+ return True, ""
39
+
40
+
41
+ # def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
42
+ # try:
43
+ # config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
44
+ # if test_tokenizer:
45
+ # tokenizer_config = get_tokenizer_config(model_name)
46
+
47
+ # if tokenizer_config is not None:
48
+ # tokenizer_class_candidate = tokenizer_config.get("tokenizer_class", None)
49
+ # else:
50
+ # tokenizer_class_candidate = config.tokenizer_class
51
+
52
+ # tokenizer_class = None
53
+ # if tokenizer_class_candidate is not None:
54
+ # tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
55
+
56
+ # if tokenizer_class is None:
57
+ # return (
58
+ # False,
59
+ # f"uses {tokenizer_class_candidate}, which is not in a transformers release, therefore not supported at the moment.", # pythia-160m throws this error. seems unnecessary.
60
+ # None
61
+ # )
62
+ # return True, None, config
63
+
64
+ # except ValueError:
65
+ # return (
66
+ # False,
67
+ # "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
68
+ # None
69
+ # )
70
+
71
+ # except Exception as e:
72
+ # print('XXX', e)
73
+ # return False, "was not found on hub!", None
74
+
75
+ # replaced with https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/blob/main/src/submission/check_validity.py
76
+ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str, AutoConfig]:
77
+ try:
78
+ config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token) #, force_download=True)
79
+ if test_tokenizer:
80
+ try:
81
+ tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
82
+ except ValueError as e:
83
+ return (
84
+ False,
85
+ f"uses a tokenizer which is not in a transformers release: {e}",
86
+ None
87
+ )
88
+ except Exception as e:
89
+ return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
90
+ return True, None, config
91
+
92
+ except ValueError as e:
93
+ return (
94
+ False,
95
+ "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
96
+ None
97
+ )
98
+
99
+ except Exception as e:
100
+ return False, "was not found on hub!", None
101
+
102
+ def get_model_size(model_info: ModelInfo, precision: str):
103
+ size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
104
+ try:
105
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
106
+ except (AttributeError, TypeError ):
107
+ try:
108
+ size_match = re.search(size_pattern, model_info.modelId.lower())
109
+ model_size = size_match.group(0)
110
+ model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
111
+ except AttributeError:
112
+ return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
113
+
114
+ size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
115
+ model_size = size_factor * model_size
116
+ return model_size
117
+
118
+ def get_model_arch(model_info: ModelInfo):
119
+ return model_info.config.get("architectures", "Unknown")
120
+
121
+ def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota):
122
+ if org_or_user not in users_to_submission_dates:
123
+ return True, ""
124
+ submission_dates = sorted(users_to_submission_dates[org_or_user])
125
+
126
+ time_limit = (datetime.now(timezone.utc) - timedelta(days=rate_limit_period)).strftime("%Y-%m-%dT%H:%M:%SZ")
127
+ submissions_after_timelimit = [d for d in submission_dates if d > time_limit]
128
+
129
+ num_models_submitted_in_period = len(submissions_after_timelimit)
130
+ if org_or_user in HAS_HIGHER_RATE_LIMIT:
131
+ rate_limit_quota = 2 * rate_limit_quota
132
+
133
+ if num_models_submitted_in_period > rate_limit_quota:
134
+ error_msg = f"Organisation or user `{org_or_user}`"
135
+ error_msg += f"already has {num_models_submitted_in_period} model requests submitted to the leaderboard "
136
+ error_msg += f"in the last {rate_limit_period} days.\n"
137
+ error_msg += (
138
+ "Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗"
139
+ )
140
+ return False, error_msg
141
+ return True, ""
142
+
143
+
144
+ # # already_submitted_models(EVAL_REQUESTS_PATH) os.path.join(CACHE_PATH, "eval-queue")
145
+ # # REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
146
+ # # debug: current code doesn't allow submission of the same model for a different task.
147
+ # def already_submitted_models(requested_models_dir: str) -> set[str]:
148
+ # depth = 1
149
+ # file_names = []
150
+ # users_to_submission_dates = defaultdict(list)
151
+
152
+ # for root, _, files in os.walk(requested_models_dir):
153
+ # current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
154
+ # if current_depth == depth:
155
+ # for file in files:
156
+ # if not file.endswith(".json"):
157
+ # continue
158
+ # with open(os.path.join(root, file), "r") as f:
159
+ # info = json.load(f)
160
+ # file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
161
+
162
+ # # Select organisation
163
+ # if info["model"].count("/") == 0 or "submitted_time" not in info:
164
+ # continue
165
+ # organisation, _ = info["model"].split("/")
166
+ # users_to_submission_dates[organisation].append(info["submitted_time"]) # why is this useful?
167
+
168
+ # return set(file_names), users_to_submission_dates
169
+
170
+ def already_submitted_models(requested_models_dir: str) -> set[str]:
171
+ depth = 1
172
+ file_names = [] # more like identifiers
173
+ users_to_submission_dates = defaultdict(list)
174
+
175
+ for root, _, files in os.walk(requested_models_dir):
176
+ current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
177
+ if current_depth == depth:
178
+ for file in files:
179
+ if not file.endswith(".json"):
180
+ continue
181
+ with open(os.path.join(root, file), "r") as f:
182
+ info = json.load(f)
183
+ requested_tasks = [task_dic['benchmark'] for task_dic in info["requested_tasks"]]
184
+ for requested_task in requested_tasks:
185
+
186
+ file_names.append(f"{info['model']}_{requested_task}_{info['revision']}_{info['precision']}")
187
+
188
+ # Select organisation
189
+ if info["model"].count("/") == 0 or "submitted_time" not in info:
190
+ continue
191
+ organisation, _ = info["model"].split("/")
192
+ users_to_submission_dates[organisation].append(info["submitted_time"]) # why is this useful?
193
+
194
+ return set(file_names), users_to_submission_dates
src/submission/submit.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from datetime import datetime, timezone
4
+
5
+ from src.display.formatting import styled_error, styled_message, styled_warning
6
+ from src.envs import API, EVAL_REQUESTS_PATH, H4_TOKEN, QUEUE_REPO, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA
7
+ from src.leaderboard.filter_models import DO_NOT_SUBMIT_MODELS
8
+ from src.submission.check_validity import (
9
+ already_submitted_models,
10
+ check_model_card,
11
+ get_model_size,
12
+ is_model_on_hub,
13
+ user_submission_permission,
14
+ )
15
+
16
+ ## it just uploads request file. where does the evaluation actually happen?
17
+
18
+ REQUESTED_MODELS = None
19
+ USERS_TO_SUBMISSION_DATES = None
20
+
21
+
22
+ def add_new_eval(
23
+ model: str,
24
+
25
+ requested_tasks: list, # write better type hints. this is list of class Task.
26
+
27
+
28
+ base_model: str,
29
+ revision: str,
30
+ precision: str,
31
+ private: bool,
32
+ weight_type: str,
33
+ model_type: str,
34
+ ):
35
+ global REQUESTED_MODELS
36
+ global USERS_TO_SUBMISSION_DATES
37
+ if not REQUESTED_MODELS:
38
+ REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
39
+ # REQUESTED_MODELS is set(file_names), where file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
40
+
41
+ user_name = ""
42
+ model_path = model
43
+ if "/" in model:
44
+ user_name = model.split("/")[0]
45
+ model_path = model.split("/")[1]
46
+
47
+ precision = precision.split(" ")[0]
48
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
49
+
50
+ if model_type is None or model_type == "":
51
+ return styled_error("Please select a model type.")
52
+
53
+ # Is the user rate limited?
54
+ if user_name != "":
55
+ user_can_submit, error_msg = user_submission_permission(
56
+ user_name, USERS_TO_SUBMISSION_DATES, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA
57
+ )
58
+ if not user_can_submit:
59
+ return styled_error(error_msg)
60
+
61
+ # Did the model authors forbid its submission to the leaderboard?
62
+ if model in DO_NOT_SUBMIT_MODELS or base_model in DO_NOT_SUBMIT_MODELS:
63
+ return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.")
64
+
65
+ # Does the model actually exist?
66
+ if revision == "":
67
+ revision = "main"
68
+
69
+ # Is the model on the hub?
70
+ if weight_type in ["Delta", "Adapter"]:
71
+ base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=H4_TOKEN, test_tokenizer=True)
72
+ if not base_model_on_hub:
73
+ return styled_error(f'Base model "{base_model}" {error}')
74
+
75
+ if not weight_type == "Adapter":
76
+ model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, test_tokenizer=True)
77
+ if not model_on_hub:
78
+ return styled_error(f'Model "{model}" {error}')
79
+
80
+ # Is the model info correctly filled?
81
+ try:
82
+ model_info = API.model_info(repo_id=model, revision=revision)
83
+ except Exception:
84
+ return styled_error("Could not get your model information. Please fill it up properly.")
85
+
86
+ model_size = get_model_size(model_info=model_info, precision=precision)
87
+
88
+ # Were the model card and license filled?
89
+ try:
90
+ license = model_info.cardData["license"]
91
+ except Exception:
92
+ return styled_error("Please select a license for your model")
93
+
94
+ modelcard_OK, error_msg = check_model_card(model)
95
+ if not modelcard_OK:
96
+ return styled_error(error_msg)
97
+
98
+ # Seems good, creating the eval
99
+ print("Adding new eval")
100
+
101
+ print()
102
+ print(f"requested_tasks: {requested_tasks}")
103
+ print(f"type(requested_tasks): {type(requested_tasks)}")
104
+ print()
105
+ # requested_tasks: [{'benchmark': 'hellaswag', 'metric': 'acc_norm', 'col_name': 'HellaSwag'}, {'benchmark': 'pubmedqa', 'metric': 'acc', 'col_name': 'PubMedQA'}]
106
+ # type(requested_tasks): <class 'list'>
107
+
108
+ requested_task_names = [task_dic['benchmark'] for task_dic in requested_tasks]
109
+
110
+ print()
111
+ print(f"requested_task_names: {requested_task_names}")
112
+ print(f"type(requested_task_names): {type(requested_task_names)}")
113
+ print()
114
+
115
+ already_submitted_tasks = []
116
+
117
+ for requested_task_name in requested_task_names:
118
+
119
+ if f"{model}_{requested_task_name}_{revision}_{precision}" in REQUESTED_MODELS:
120
+ # return styled_warning("This model has been already submitted.")
121
+ already_submitted_tasks.append(requested_task_name)
122
+
123
+ task_names_for_eval = set(requested_task_names) - set(already_submitted_tasks)
124
+ task_names_for_eval = list(task_names_for_eval)
125
+
126
+ return_msg = "Your request has been submitted to the evaluation queue! Please wait for up to an hour for the model to show in the PENDING list."
127
+ if len(already_submitted_tasks) > 0:
128
+
129
+ return_msg = f"This model has been already submitted for task(s) {already_submitted_tasks}. Evaluation will proceed for tasks {task_names_for_eval}. Please wait for up to an hour for the model to show in the PENDING list."
130
+
131
+ if len(task_names_for_eval)==0:
132
+ return styled_warning(f"This model has been already submitted for task(s) {already_submitted_tasks}.")
133
+
134
+ tasks_for_eval = [dct for dct in requested_tasks if dct['benchmark'] in task_names_for_eval]
135
+
136
+ print()
137
+ print(f"tasks_for_eval: {tasks_for_eval}")
138
+ # print(f"type(requested_task_names): {type(requested_task_names)}")
139
+ print()
140
+
141
+ eval_entry = {
142
+ "model": model,
143
+
144
+ "requested_tasks": tasks_for_eval, # this is a list of tasks. would eval file be written correctly for each tasks? YES. run_evaluation() takes list of tasks. might have to specify
145
+
146
+ "base_model": base_model,
147
+ "revision": revision,
148
+ "private": private,
149
+ "precision": precision,
150
+ "weight_type": weight_type,
151
+ "status": "PENDING",
152
+ "submitted_time": current_time,
153
+ "model_type": model_type,
154
+ "likes": model_info.likes,
155
+ "params": model_size,
156
+ "license": license,
157
+ }
158
+
159
+
160
+ ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####---- ####----
161
+
162
+
163
+
164
+ print("Creating eval file")
165
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}" # local path
166
+ os.makedirs(OUT_DIR, exist_ok=True)
167
+ out_path = f"{OUT_DIR}/{model_path}_{'_'.join([f'{task}' for task in task_names_for_eval])}_eval_request_{private}_{precision}_{weight_type}.json"
168
+
169
+ print(f"out_path = {out_path}")
170
+
171
+ with open(out_path, "w") as f:
172
+ f.write(json.dumps(eval_entry)) # local path used! for saving request file.
173
+
174
+ print("Uploading eval file (QUEUE_REPO)")
175
+ print()
176
+ print(f"path_or_fileobj={out_path}, path_in_repo={out_path.split('eval-queue/')[1]}, repo_id={QUEUE_REPO}, repo_type=dataset,")
177
+ API.upload_file(
178
+ path_or_fileobj=out_path,
179
+ path_in_repo=out_path.split("eval-queue/")[1],
180
+ repo_id=QUEUE_REPO,
181
+ repo_type="dataset",
182
+ commit_message=f"Add {model} to eval queue",
183
+ )
184
+
185
+ print(f"is os.remove(out_path) the problem?")
186
+ # Remove the local file
187
+ os.remove(out_path)
188
+
189
+ return styled_message(
190
+ return_msg
191
+ )
src/utils.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from huggingface_hub import snapshot_download
3
+
4
+
5
+ def my_snapshot_download(repo_id, revision, local_dir, repo_type, max_workers):
6
+ for i in range(10):
7
+ try:
8
+ snapshot_download(repo_id=repo_id, revision=revision, local_dir=local_dir, repo_type=repo_type, max_workers=max_workers)
9
+ return
10
+ except Exception:
11
+ import time
12
+ time.sleep(60)
13
+ return
14
+
15
+
16
+ def get_dataset_url(row):
17
+ dataset_name = row['Benchmark']
18
+ dataset_url = row['Dataset Link']
19
+ benchmark = f'<a target="_blank" href="{dataset_url}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{dataset_name}</a>'
20
+ return benchmark
21
+
22
+ def get_dataset_summary_table(file_path):
23
+ df = pd.read_csv(file_path)
24
+
25
+ df['Benchmark'] = df.apply(lambda x: get_dataset_url(x), axis=1)
26
+
27
+ df = df[['Category', 'Benchmark', 'Data Split', 'Data Size', 'Language']]
28
+
29
+ return df