comparator / src /constants.py
albertvillanova's picture
Support .json and .jsonl details files
148216f verified
raw
history blame
6.61 kB
RESULTS_DATASET_ID = "datasets/open-llm-leaderboard/results"
DETAILS_DATASET_ID = "datasets/open-llm-leaderboard/{model_name_sanitized}-details"
DETAILS_FILENAME = "samples_{subtask}_*.json*"
TASKS = {
# "leaderboard_arc_challenge": ("ARC", "leaderboard_arc_challenge"),
"leaderboard_bbh": ("BBH", "leaderboard_bbh"),
"leaderboard_gpqa": ("GPQA", "leaderboard_gpqa"),
"leaderboard_ifeval": ("IFEval", "leaderboard_ifeval"),
"leaderboard_math_hard": ("MATH", "leaderboard_math"),
"leaderboard_mmlu_pro": ("MMLU-Pro", "leaderboard_mmlu_pro"),
"leaderboard_musr": ("MuSR", "leaderboard_musr"),
}
SUBTASKS = {
# "leaderboard_arc_challenge": ["leaderboard_arc_challenge"],
"leaderboard_bbh": [
("Boolean Expressions", "leaderboard_bbh_boolean_expressions"),
("Causal Judgment", "leaderboard_bbh_causal_judgement"),
("Date Understanding", "leaderboard_bbh_date_understanding"),
("Disambiguation QA", "leaderboard_bbh_disambiguation_qa"),
("Formal Fallacies Syllogisms Negation", "leaderboard_bbh_formal_fallacies"),
("Geometric Shapes", "leaderboard_bbh_geometric_shapes"),
("Hyperbaton", "leaderboard_bbh_hyperbaton"),
("Logical Deduction (5)", "leaderboard_bbh_logical_deduction_five_objects"),
("Logical Deduction (7)", "leaderboard_bbh_logical_deduction_seven_objects"),
("Logical Deduction (3)", "leaderboard_bbh_logical_deduction_three_objects"),
("Movie Recommendation", "leaderboard_bbh_movie_recommendation"),
("Navigate", "leaderboard_bbh_navigate"),
("Object Counting", "leaderboard_bbh_object_counting"),
("Penguins in a Table", "leaderboard_bbh_penguins_in_a_table"),
("Reasoning about Colored Objects", "leaderboard_bbh_reasoning_about_colored_objects"),
("Ruin Names", "leaderboard_bbh_ruin_names"),
("Salient Translation Error Detection", "leaderboard_bbh_salient_translation_error_detection"),
("Sports Understanding", "leaderboard_bbh_snarks", "leaderboard_bbh_sports_understanding"),
("Temporal Sequences", "leaderboard_bbh_temporal_sequences"),
("Tracking Shuffled Objects (5)", "leaderboard_bbh_tracking_shuffled_objects_five_objects"),
("Tracking Shuffled Objects (7)", "leaderboard_bbh_tracking_shuffled_objects_seven_objects"),
("Tracking Shuffled Objects (3)", "leaderboard_bbh_tracking_shuffled_objects_three_objects"),
("Web of Lies", "leaderboard_bbh_web_of_lies"),
],
"leaderboard_gpqa": [
("Extended", "leaderboard_gpqa_extended"),
("Diamond", "leaderboard_gpqa_diamond"),
("Main", "leaderboard_gpqa_main"),
],
"leaderboard_ifeval": [("IFEval", "leaderboard_ifeval")],
# "leaderboard_math_hard": [
"leaderboard_math": [
("Algebra", "leaderboard_math_algebra_hard"),
("Counting and Probability", "leaderboard_math_counting_and_prob_hard"),
("Geometry", "leaderboard_math_geometry_hard"),
("Intermediate Algebra", "leaderboard_math_intermediate_algebra_hard"),
("Number Theory", "leaderboard_math_num_theory_hard"),
("Prealgebra", "leaderboard_math_prealgebra_hard"),
("Precalculus", "leaderboard_math_precalculus_hard"),
],
"leaderboard_mmlu_pro": [("MMLU-Pro", "leaderboard_mmlu_pro")],
"leaderboard_musr": [
("Murder Mystery", "leaderboard_musr_murder_mysteries"),
("Object Placements", "leaderboard_musr_object_placements"),
("Team Allocation", "leaderboard_musr_team_allocation"),
],
}
TASK_DESCRIPTIONS = {
"leaderboard_bbh": "BBH is a subset of 23 challenging tasks from the BigBench dataset to evaluate language models. The tasks use objective metrics, are highly difficult, and have sufficient sample sizes for statistical significance. They include multistep arithmetic, algorithmic reasoning (e.g., boolean expressions, SVG shapes), language understanding (e.g., sarcasm detection, name disambiguation), and world knowledge. BBH performance correlates well with human preferences, providing valuable insights into model capabilities.",
"leaderboard_gpqa": "GPQA is a highly challenging knowledge dataset with questions crafted by PhD-level domain experts in fields like biology, physics, and chemistry. These questions are designed to be difficult for laypersons but relatively easy for experts. The dataset has undergone multiple rounds of validation to ensure both difficulty and factual accuracy.\n\nWe require login to see the details because access to GPQA is restricted to minimize the risk of data contamination.",
"leaderboard_ifeval": "IFEval is a dataset designed to test a model’s ability to follow explicit instructions, such as “include keyword x” or “use format y.” The focus is on the model’s adherence to formatting instructions rather than the content generated, allowing for the use of strict and rigorous metrics.",
# "leaderboard_math_hard": "MATH is a compilation of high-school level competition problems gathered from several sources, formatted consistently using Latex for equations and Asymptote for figures. Generations must fit a very specific output format. We keep only level 5 MATH questions and call it MATH Lvl 5.",
"leaderboard_math": "MATH is a compilation of high-school level competition problems gathered from several sources, formatted consistently using Latex for equations and Asymptote for figures. Generations must fit a very specific output format. We keep only level 5 MATH questions and call it MATH Lvl 5.",
"leaderboard_mmlu_pro": "MMLU-Pro is a refined version of the MMLU dataset, which has been a standard for multiple-choice knowledge assessment. Recent research identified issues with the original MMLU, such as noisy data (some unanswerable questions) and decreasing difficulty due to advances in model capabilities and increased data contamination. MMLU-Pro addresses these issues by presenting models with 10 choices instead of 4, requiring reasoning on more questions, and undergoing expert review to reduce noise. As a result, MMLU-Pro is of higher quality and currently more challenging than the original.",
"leaderboard_musr": "MuSR is a new dataset consisting of algorithmically generated complex problems, each around 1,000 words in length. The problems include murder mysteries, object placement questions, and team allocation optimizations. Solving these problems requires models to integrate reasoning with long-range context parsing. Few models achieve better than random performance on this dataset.",
}