Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update src/submission/submit.py
Browse files- src/submission/submit.py +4 -4
src/submission/submit.py
CHANGED
@@ -99,9 +99,9 @@ def add_new_eval(
|
|
99 |
|
100 |
# Seems good, creating the eval
|
101 |
print("Adding new eval")
|
102 |
-
dfs = get_evaluation_queue_df(EVAL_REQUESTS_PATH, cols=["job_id"])
|
103 |
-
dfs = pd.concat(dfs).reset_index(drop=True)
|
104 |
-
max_job_id = max([int(c) for c in dfs["job_id"].values])
|
105 |
|
106 |
eval_entry = {
|
107 |
"model": model,
|
@@ -116,7 +116,7 @@ def add_new_eval(
|
|
116 |
"likes": model_info.likes,
|
117 |
"params": model_size,
|
118 |
"license": license,
|
119 |
-
"job_id": max_job_id+1
|
120 |
}
|
121 |
|
122 |
# Check for duplicate submission
|
|
|
99 |
|
100 |
# Seems good, creating the eval
|
101 |
print("Adding new eval")
|
102 |
+
# dfs = get_evaluation_queue_df(EVAL_REQUESTS_PATH, cols=["job_id"])
|
103 |
+
# dfs = pd.concat(dfs).reset_index(drop=True)
|
104 |
+
# max_job_id = max([int(c) for c in dfs["job_id"].values])
|
105 |
|
106 |
eval_entry = {
|
107 |
"model": model,
|
|
|
116 |
"likes": model_info.likes,
|
117 |
"params": model_size,
|
118 |
"license": license,
|
119 |
+
# "job_id": max_job_id+1
|
120 |
}
|
121 |
|
122 |
# Check for duplicate submission
|