eduagarcia
commited on
Commit
β’
2519924
1
Parent(s):
6c50b96
Revert "Retry 1 FAILED models"
Browse filesThis reverts commit 6c50b96af97408dc80dd090ac45740cadb172f27.
- ConvexAI/Luminex-34B-v0.1_eval_request_False_float16_Original.json +17 -3
- Qwen/Qwen2-72B-Instruct_eval_request_False_bfloat16_Original.json +17 -3
- Qwen/Qwen2-72B_eval_request_False_bfloat16_Original.json +3 -3
- ibivibiv/multimaster-7b-v6_eval_request_False_bfloat16_Original.json +17 -3
- wenbopan/Faro-Yi-9B-DPO_eval_request_False_float16_Original.json +4 -2
ConvexAI/Luminex-34B-v0.1_eval_request_False_float16_Original.json
CHANGED
@@ -8,10 +8,24 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-08T22:12:40Z",
|
13 |
"model_type": "πΆ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
-
"job_id":
|
16 |
-
"job_start_time":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "FINISHED",
|
12 |
"submitted_time": "2024-06-08T22:12:40Z",
|
13 |
"model_type": "πΆ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
+
"job_id": 814,
|
16 |
+
"job_start_time": "2024-06-14T16-37-48.304200",
|
17 |
+
"eval_version": "1.1.0",
|
18 |
+
"result_metrics": {
|
19 |
+
"enem_challenge": 0.7200839748075577,
|
20 |
+
"bluex": 0.6481223922114048,
|
21 |
+
"oab_exams": 0.544874715261959,
|
22 |
+
"assin2_rte": 0.9191070641797621,
|
23 |
+
"assin2_sts": 0.8130683879495547,
|
24 |
+
"faquad_nli": 0.8226956044555551,
|
25 |
+
"hatebr_offensive": 0.6983754481802518,
|
26 |
+
"portuguese_hate_speech": 0.7080758240759798,
|
27 |
+
"tweetsentbr": 0.6743942014992422
|
28 |
+
},
|
29 |
+
"result_metrics_average": 0.7276441791801409,
|
30 |
+
"result_metrics_npm": 0.5851660324853423
|
31 |
}
|
Qwen/Qwen2-72B-Instruct_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,10 +8,24 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-08T03:08:06Z",
|
13 |
"model_type": "π¬ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
-
"job_id":
|
16 |
-
"job_start_time":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "FINISHED",
|
12 |
"submitted_time": "2024-06-08T03:08:06Z",
|
13 |
"model_type": "π¬ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
+
"job_id": 820,
|
16 |
+
"job_start_time": "2024-06-14T13-11-55.047025",
|
17 |
+
"eval_version": "1.1.0",
|
18 |
+
"result_metrics": {
|
19 |
+
"enem_challenge": 0.8257522743177047,
|
20 |
+
"bluex": 0.7538247566063978,
|
21 |
+
"oab_exams": 0.6564920273348519,
|
22 |
+
"assin2_rte": 0.9493366322025627,
|
23 |
+
"assin2_sts": 0.7472406013787171,
|
24 |
+
"faquad_nli": 0.8439608636977058,
|
25 |
+
"hatebr_offensive": 0.8393795461324889,
|
26 |
+
"portuguese_hate_speech": 0.7446794363437921,
|
27 |
+
"tweetsentbr": 0.7550808829097511
|
28 |
+
},
|
29 |
+
"result_metrics_average": 0.7906385578804414,
|
30 |
+
"result_metrics_npm": 0.6877609902729503
|
31 |
}
|
Qwen/Qwen2-72B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,10 +8,10 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-08T03:07:52Z",
|
13 |
"model_type": "π’ : pretrained",
|
14 |
"source": "leaderboard",
|
15 |
-
"job_id":
|
16 |
-
"job_start_time":
|
17 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RUNNING",
|
12 |
"submitted_time": "2024-06-08T03:07:52Z",
|
13 |
"model_type": "π’ : pretrained",
|
14 |
"source": "leaderboard",
|
15 |
+
"job_id": 821,
|
16 |
+
"job_start_time": "2024-06-14T18-05-03.920497"
|
17 |
}
|
ibivibiv/multimaster-7b-v6_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,10 +8,24 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-08T03:17:21Z",
|
13 |
"model_type": "π€ : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
-
"job_id":
|
16 |
-
"job_start_time":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "FINISHED",
|
12 |
"submitted_time": "2024-06-08T03:17:21Z",
|
13 |
"model_type": "π€ : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
+
"job_id": 813,
|
16 |
+
"job_start_time": "2024-06-14T13-39-45.160271",
|
17 |
+
"eval_version": "1.1.0",
|
18 |
+
"result_metrics": {
|
19 |
+
"enem_challenge": 0.6445066480055983,
|
20 |
+
"bluex": 0.5479833101529903,
|
21 |
+
"oab_exams": 0.42642369020501136,
|
22 |
+
"assin2_rte": 0.9219507504908876,
|
23 |
+
"assin2_sts": 0.775788909453694,
|
24 |
+
"faquad_nli": 0.7186147186147187,
|
25 |
+
"hatebr_offensive": 0.867516403157304,
|
26 |
+
"portuguese_hate_speech": 0.661509185600641,
|
27 |
+
"tweetsentbr": 0.4976233004346325
|
28 |
+
},
|
29 |
+
"result_metrics_average": 0.673546324012831,
|
30 |
+
"result_metrics_npm": 0.5164232867443359
|
31 |
}
|
wenbopan/Faro-Yi-9B-DPO_eval_request_False_float16_Original.json
CHANGED
@@ -8,10 +8,12 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-29T17:01:37Z",
|
13 |
"model_type": "π¬ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 793,
|
16 |
-
"job_start_time": "2024-06-12T14-48-22.236790"
|
|
|
|
|
17 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "FAILED",
|
12 |
"submitted_time": "2024-05-29T17:01:37Z",
|
13 |
"model_type": "π¬ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 793,
|
16 |
+
"job_start_time": "2024-06-12T14-48-22.236790",
|
17 |
+
"error_msg": "The NVIDIA driver on your system is too old (found version 11040). Please update your GPU driver by downloading and installing a new version from the URL: http://www.nvidia.com/Download/index.aspx Alternatively, go to: https://pytorch.org to install a PyTorch version that has been compiled with your version of the CUDA driver.",
|
18 |
+
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 563, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3754, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4214, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 887, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 399, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 293, in _lazy_init\n torch._C._cuda_init()\nRuntimeError: The NVIDIA driver on your system is too old (found version 11040). Please update your GPU driver by downloading and installing a new version from the URL: http://www.nvidia.com/Download/index.aspx Alternatively, go to: https://pytorch.org to install a PyTorch version that has been compiled with your version of the CUDA driver.\n"
|
19 |
}
|