Seraph19 commited on
Commit
8c88cb5
β€’
1 Parent(s): 74b17f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -184
app.py CHANGED
@@ -3,58 +3,65 @@ from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
6
-
7
  from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
  )
15
  from src.display.css_html_js import custom_css
16
  from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
  )
27
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
  from src.submission.submit import add_new_eval
30
 
31
-
32
  def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
34
 
35
  ### Space initialisation
36
  try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
 
 
 
 
 
41
  except Exception:
42
- restart_space()
 
43
  try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
 
 
 
 
 
48
  except Exception:
49
- restart_space()
50
-
51
 
52
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
  (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
  ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
 
60
  def init_leaderboard(dataframe):
@@ -70,117 +77,40 @@ def init_leaderboard(dataframe):
70
  ),
71
  search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0.01, max=150, label="Select the number of parameters (B)"),
77
- ColumnFilter(AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True),
78
- ],
79
- bool_checkboxgroup_label="Hide models",
80
- interactive=False,
81
- )
82
- def display_user_data(user_id):
 
 
83
  user_data = data.load_data()
84
  if user_id in user_data:
85
- return f"Points: {user_data[user_id]['points']}\nReferrals: {len(user_data[user_id]['referrals'])}"
86
  else:
87
- return "User not found"
88
- import gradio as gr # No indentation before this line!
89
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
90
- import pandas as pd
91
- # ... (Rest of your code)
92
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
93
- model_type = gr.Dropdown(
94
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
95
- label="Model type",
96
- multiselect=False,
97
- value=None,
98
- interactive=True,
99
- )
100
- with gr.Column():
101
- precision = gr.Dropdown(
102
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
103
- label="Precision",
104
- multiselect=False,
105
- value="float16",
106
- interactive=True,
107
- )
108
- weight_type = gr.Dropdown(
109
- choices=[i.value.name for i in WeightType],
110
- label="Weights type",
111
- multiselect=False,
112
- value="Original",
113
- interactive=True,
114
- )
115
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
116
- model_name_textbox = gr.Textbox(label="Model name") # Add this line
117
-
118
- submit_button = gr.Button("Submit Eval")
119
- submission_result = gr.Markdown()
120
 
121
- # Indent the entire `with gr.Row()` section
122
- with gr.Row():
123
- with gr.Accordion("πŸ“™ Citation", open=False):
124
- citation_button = gr.Textbox(
125
- value=CITATION_BUTTON_TEXT,
126
- label=CITATION_BUTTON_LABEL,
127
- lines=20,
128
- elem_id="citation-button",
129
- show_copy_button=True,
130
- )
131
-
132
- # ... (Rest of your code)
133
-
134
- submit_button.click(
135
- add_new_eval,
136
- [
137
- model_name_textbox,
138
- base_model_name_textbox,
139
- revision_name_textbox,
140
- precision,
141
- weight_type,
142
- model_type,
143
- ],
144
- submission_result,
145
- )
146
-
147
- # ... (Scheduler)
148
- start_button = gr.Button("Start", elem_id="start_button")
149
- claim_image = gr.Image(value="path/to/your/claim_image.png", interactive=True, elem_id="claim_image")
150
- start_button.click(fn=display_user_data, inputs=[gr.inputs.Textbox(label="Enter your Telegram User ID")], outputs=user_data_output)
151
- claim_image.click(fn=lambda: "You have claimed your reward!", outputs=gr.outputs.Textbox(label="Message"))
152
- iface = gr.Interface(
153
- fn=None, # You can add functions for tasks later
154
- inputs=[
155
- join_channel_section,
156
- invite_friends_section,
157
- ],
158
- outputs=[user_data_output],
159
- title="Mukera",
160
- )
161
- iface.launch(share=True, inline=True)
162
  demo = gr.Blocks(css=custom_css)
 
163
  with demo:
164
  gr.HTML(TITLE)
165
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
166
-
167
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
168
  with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
169
  leaderboard = init_leaderboard(LEADERBOARD_DF)
170
-
171
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
172
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
173
-
174
  with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
175
  with gr.Column():
176
  with gr.Row():
177
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
178
-
179
  with gr.Column():
180
- with gr.Accordion(
181
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
182
- open=False,
183
- ):
184
  with gr.Row():
185
  finished_eval_table = gr.components.Dataframe(
186
  value=finished_eval_queue_df,
@@ -188,22 +118,14 @@ with demo:
188
  datatype=EVAL_TYPES,
189
  row_count=5,
190
  )
191
- with gr.Accordion(
192
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
193
- open=False,
194
- ):
195
  with gr.Row():
196
- running_eval_table = gr.components.Dataframe(
197
- value=running_eval_queue_df,
198
  headers=EVAL_COLS,
199
  datatype=EVAL_TYPES,
200
  row_count=5,
201
  )
202
-
203
- with gr.Accordion(
204
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
205
- open=False,
206
- ):
207
  with gr.Row():
208
  pending_eval_table = gr.components.Dataframe(
209
  value=pending_eval_queue_df,
@@ -211,40 +133,47 @@ with demo:
211
  datatype=EVAL_TYPES,
212
  row_count=5,
213
  )
214
- with gr.Row():
215
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
216
-
217
- with gr.Row():
218
- with gr.Column():
219
- model_name_textbox = gr.Textbox(label="Model name")
220
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
221
- model_type = gr.Dropdown(
222
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
223
- label="Model type",
224
- multiselect=False,
225
- value=None,
226
- interactive=True,
227
- )
228
-
229
- with gr.Column():
230
- precision = gr.Dropdown(
231
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
232
- label="Precision",
233
- multiselect=False,
234
- value="float16",
235
- interactive=True,
236
- )
237
- weight_type = gr.Dropdown(
238
- choices=[i.value.name for i in WeightType],
239
- label="Weights type",
240
- multiselect=False,
241
- value="Original",
242
- interactive=True,
243
- )
244
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
245
-
246
  submit_button = gr.Button("Submit Eval")
247
  submission_result = gr.Markdown()
 
 
 
 
 
 
 
 
 
248
  submit_button.click(
249
  add_new_eval,
250
  [
@@ -257,18 +186,8 @@ with demo:
257
  ],
258
  submission_result,
259
  )
260
-
261
- with gr.Row():
262
- with gr.Accordion("πŸ“™ Citation", open=False):
263
- citation_button = gr.Textbox(
264
- value=CITATION_BUTTON_TEXT,
265
- label=CITATION_BUTTON_LABEL,
266
- lines=20,
267
- elem_id="citation-button",
268
- show_copy_button=True,
269
- )
270
-
271
- scheduler = BackgroundScheduler()
272
- scheduler.add_job(restart_space, "interval", seconds=1800)
273
- scheduler.start()
274
- demo.queue(default_concurrency_limit=40).launch()
 
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
 
6
  from src.about import (
7
+ CITATION_BUTTON_LABEL,
8
+ CITATION_BUTTON_TEXT,
9
+ EVALUATION_QUEUE_TEXT,
10
+ INTRODUCTION_TEXT,
11
+ LLM_BENCHMARKS_TEXT,
12
+ TITLE,
13
  )
14
  from src.display.css_html_js import custom_css
15
  from src.display.utils import (
16
+ BENCHMARK_COLS,
17
+ COLS,
18
+ EVAL_COLS,
19
+ EVAL_TYPES,
20
+ AutoEvalColumn,
21
+ ModelType,
22
+ fields,
23
+ WeightType,
24
+ Precision,
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
27
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
28
  from src.submission.submit import add_new_eval
29
 
 
30
  def restart_space():
31
+ API.restart_space(repo_id=REPO_ID)
32
 
33
  ### Space initialisation
34
  try:
35
+ print(EVAL_REQUESTS_PATH)
36
+ snapshot_download(
37
+ repo_id=QUEUE_REPO,
38
+ local_dir=EVAL_REQUESTS_PATH,
39
+ repo_type="dataset",
40
+ tqdm_class=None,
41
+ etag_timeout=30,
42
+ token=TOKEN,
43
+ )
44
  except Exception:
45
+ restart_space()
46
+
47
  try:
48
+ print(EVAL_RESULTS_PATH)
49
+ snapshot_download(
50
+ repo_id=RESULTS_REPO,
51
+ local_dir=EVAL_RESULTS_PATH,
52
+ repo_type="dataset",
53
+ tqdm_class=None,
54
+ etag_timeout=30,
55
+ token=TOKEN,
56
+ )
57
  except Exception:
58
+ restart_space()
 
59
 
60
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
 
61
  (
62
+ finished_eval_queue_df,
63
+ running_eval_queue_df,
64
+ pending_eval_queue_df,
65
  ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
66
 
67
  def init_leaderboard(dataframe):
 
77
  ),
78
  search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
79
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
80
+ filter_columns=[ ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
81
+ ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
82
+ ColumnFilter(
83
+ AutoEvalColumn.params.name, type="slider", min=0.01, max=150, label="Select the number of parameters (B)"
84
+ ),
85
+ ColumnFilter(AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True),
86
+ ],
87
+ bool_checkboxgroup_label="Hide models",
88
+ interactive=False,
89
+ )
90
+
91
+ def display_user_data(user_id):
92
  user_data = data.load_data()
93
  if user_id in user_data:
94
+ return f"Points: {user_data[user_id]['points']}\nReferrals: {len(user_data[user_id]['referrals'])}"
95
  else:
96
+ return "User not found"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  demo = gr.Blocks(css=custom_css)
99
+
100
  with demo:
101
  gr.HTML(TITLE)
102
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
103
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
104
  with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
105
  leaderboard = init_leaderboard(LEADERBOARD_DF)
 
106
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
107
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
 
108
  with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
109
  with gr.Column():
110
  with gr.Row():
111
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
 
112
  with gr.Column():
113
+ with gr.Accordion(f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})", open=False):
 
 
 
114
  with gr.Row():
115
  finished_eval_table = gr.components.Dataframe(
116
  value=finished_eval_queue_df,
 
118
  datatype=EVAL_TYPES,
119
  row_count=5,
120
  )
121
+ with gr.Accordion(f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})", open=False):
 
 
 
122
  with gr.Row():
123
+ running_eval_table = gr.components.Dataframe( value=running_eval_queue_df,
 
124
  headers=EVAL_COLS,
125
  datatype=EVAL_TYPES,
126
  row_count=5,
127
  )
128
+ with gr.Accordion(f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})", open=False):
 
 
 
 
129
  with gr.Row():
130
  pending_eval_table = gr.components.Dataframe(
131
  value=pending_eval_queue_df,
 
133
  datatype=EVAL_TYPES,
134
  row_count=5,
135
  )
136
+ with gr.Row():
137
+ gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
138
+ with gr.Row():
139
+ with gr.Column():
140
+ revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
141
+ model_type = gr.Dropdown(
142
+ choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
143
+ label="Model type",
144
+ multiselect=False,
145
+ value=None,
146
+ interactive=True,
147
+ )
148
+ with gr.Column():
149
+ precision = gr.Dropdown(
150
+ choices=[i.value.name for i in Precisi
151
+ on if i != Precision.Unknown],
152
+ label="Precision",
153
+ multiselect=False,
154
+ value="float16",
155
+ interactive=True,
156
+ )
157
+ weight_type = gr.Dropdown(
158
+ choices=[i.value.name for i in WeightType],
159
+ label="Weights type",
160
+ multiselect=False,
161
+ value="Original",
162
+ interactive=True,
163
+ )
164
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
165
+ model_name_textbox = gr.Textbox(label="Model name") # Add this line
 
 
166
  submit_button = gr.Button("Submit Eval")
167
  submission_result = gr.Markdown()
168
+ with gr.Row():
169
+ with gr.Accordion("πŸ“™ Citation", open=False):
170
+ citation_button = gr.Textbox(
171
+ value=CITATION_BUTTON_TEXT,
172
+ label=CITATION_BUTTON_LABEL,
173
+ lines=20,
174
+ elem_id="citation-button",
175
+ show_copy_button=True,
176
+ )
177
  submit_button.click(
178
  add_new_eval,
179
  [
 
186
  ],
187
  submission_result,
188
  )
189
+ start_button = gr.Button("Start", elem_id="start_button")
190
+ scheduler = BackgroundScheduler()
191
+ scheduler.add_job(restart_space, "interval", seconds=1800)
192
+ scheduler.start()
193
+ demo.queue(default_concurrency_limit=40).launch()