mtasic85 commited on
Commit
fd25fe3
1 Parent(s): 7532154

benchmarks

Browse files
Files changed (1) hide show
  1. README.md +51 -0
README.md CHANGED
@@ -154,6 +154,57 @@ litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_chal
154
  litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-leaderboard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
155
  ```
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  ```bash
158
  litgpt evaluate --tasks 'bbh_zeroshot,bbh_fewshot,bbh_cot_fewshot,bbh_cot_zeroshot' --out_dir 'evaluate-bigbenchhard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
159
  ```
 
154
  litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-leaderboard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
155
  ```
156
 
157
+ | Tasks |Version|Filter|n-shot| Metric | |Value | |Stderr|
158
+ |-----------------------------------------------------------|-------|------|-----:|-----------------------|---|-----:|---|------|
159
+ |leaderboard | N/A| | | | | | | |
160
+ | - leaderboard_bbh | N/A| | | | | | | |
161
+ | - leaderboard_bbh_boolean_expressions | 1|none | 3|acc_norm |↑ |0.4600|± |0.0316|
162
+ | - leaderboard_bbh_causal_judgement | 1|none | 3|acc_norm |↑ |0.5027|± |0.0367|
163
+ | - leaderboard_bbh_date_understanding | 1|none | 3|acc_norm |↑ |0.1720|± |0.0239|
164
+ | - leaderboard_bbh_disambiguation_qa | 1|none | 3|acc_norm |↑ |0.2960|± |0.0289|
165
+ | - leaderboard_bbh_formal_fallacies | 1|none | 3|acc_norm |↑ |0.4880|± |0.0317|
166
+ | - leaderboard_bbh_geometric_shapes | 1|none | 3|acc_norm |↑ |0.0000|± | 0|
167
+ | - leaderboard_bbh_hyperbaton | 1|none | 3|acc_norm |↑ |0.5160|± |0.0317|
168
+ | - leaderboard_bbh_logical_deduction_five_objects | 1|none | 3|acc_norm |↑ |0.2000|± |0.0253|
169
+ | - leaderboard_bbh_logical_deduction_seven_objects | 1|none | 3|acc_norm |↑ |0.1480|± |0.0225|
170
+ | - leaderboard_bbh_logical_deduction_three_objects | 1|none | 3|acc_norm |↑ |0.3160|± |0.0295|
171
+ | - leaderboard_bbh_movie_recommendation | 1|none | 3|acc_norm |↑ |0.2360|± |0.0269|
172
+ | - leaderboard_bbh_navigate | 1|none | 3|acc_norm |↑ |0.4680|± |0.0316|
173
+ | - leaderboard_bbh_object_counting | 1|none | 3|acc_norm |↑ |0.0480|± |0.0135|
174
+ | - leaderboard_bbh_penguins_in_a_table | 1|none | 3|acc_norm |↑ |0.1918|± |0.0327|
175
+ | - leaderboard_bbh_reasoning_about_colored_objects | 1|none | 3|acc_norm |↑ |0.1440|± |0.0222|
176
+ | - leaderboard_bbh_ruin_names | 1|none | 3|acc_norm |↑ |0.2360|± |0.0269|
177
+ | - leaderboard_bbh_salient_translation_error_detection | 1|none | 3|acc_norm |↑ |0.1360|± |0.0217|
178
+ | - leaderboard_bbh_snarks | 1|none | 3|acc_norm |↑ |0.5225|± |0.0375|
179
+ | - leaderboard_bbh_sports_understanding | 1|none | 3|acc_norm |↑ |0.4560|± |0.0316|
180
+ | - leaderboard_bbh_temporal_sequences | 1|none | 3|acc_norm |↑ |0.2960|± |0.0289|
181
+ | - leaderboard_bbh_tracking_shuffled_objects_five_objects | 1|none | 3|acc_norm |↑ |0.2120|± |0.0259|
182
+ | - leaderboard_bbh_tracking_shuffled_objects_seven_objects| 1|none | 3|acc_norm |↑ |0.1840|± |0.0246|
183
+ | - leaderboard_bbh_tracking_shuffled_objects_three_objects| 1|none | 3|acc_norm |↑ |0.3160|± |0.0295|
184
+ | - leaderboard_bbh_web_of_lies | 1|none | 3|acc_norm |↑ |0.5200|± |0.0317|
185
+ | - leaderboard_gpqa | N/A| | | | | | | |
186
+ | - leaderboard_gpqa_diamond | 1|none | 0|acc_norm |↑ |0.2172|± |0.0294|
187
+ | - leaderboard_gpqa_extended | 1|none | 0|acc_norm |↑ |0.2454|± |0.0184|
188
+ | - leaderboard_gpqa_main | 1|none | 0|acc_norm |↑ |0.2478|± |0.0204|
189
+ | - leaderboard_ifeval | 3|none | 0|inst_level_loose_acc |↑ |0.1727|± | N/A|
190
+ | | |none | 0|inst_level_strict_acc |↑ |0.1559|± | N/A|
191
+ | | |none | 0|prompt_level_loose_acc |↑ |0.0832|± |0.0119|
192
+ | | |none | 0|prompt_level_strict_acc|↑ |0.0795|± |0.0116|
193
+ | - leaderboard_math_hard | N/A| | | | | | | |
194
+ | - leaderboard_math_algebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
195
+ | - leaderboard_math_counting_and_prob_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
196
+ | - leaderboard_math_geometry_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
197
+ | - leaderboard_math_intermediate_algebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
198
+ | - leaderboard_math_num_theory_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
199
+ | - leaderboard_math_prealgebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
200
+ | - leaderboard_math_precalculus_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
201
+ | - leaderboard_mmlu_pro | 0.1|none | 5|acc |↑ |0.1135|± |0.0029|
202
+ | - leaderboard_musr | N/A| | | | | | | |
203
+ | - leaderboard_musr_murder_mysteries | 1|none | 0|acc_norm |↑ |0.5240|± |0.0316|
204
+ | - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.2734|± |0.0279|
205
+ | - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3000|± |0.0290|
206
+
207
+
208
  ```bash
209
  litgpt evaluate --tasks 'bbh_zeroshot,bbh_fewshot,bbh_cot_fewshot,bbh_cot_zeroshot' --out_dir 'evaluate-bigbenchhard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
210
  ```