albertvillanova HF staff commited on
Commit
e970061
1 Parent(s): 41fbe9f

Add explanation that login is required for GPQA Details

Browse files
Files changed (2) hide show
  1. src/constants.py +1 -1
  2. src/details.py +1 -1
src/constants.py CHANGED
@@ -64,7 +64,7 @@ SUBTASKS = {
64
 
65
  TASK_DESCRIPTIONS = {
66
  "leaderboard_bbh": "BBH is a subset of 23 challenging tasks from the BigBench dataset to evaluate language models. The tasks use objective metrics, are highly difficult, and have sufficient sample sizes for statistical significance. They include multistep arithmetic, algorithmic reasoning (e.g., boolean expressions, SVG shapes), language understanding (e.g., sarcasm detection, name disambiguation), and world knowledge. BBH performance correlates well with human preferences, providing valuable insights into model capabilities.",
67
- "leaderboard_gpqa": "GPQA is a highly challenging knowledge dataset with questions crafted by PhD-level domain experts in fields like biology, physics, and chemistry. These questions are designed to be difficult for laypersons but relatively easy for experts. The dataset has undergone multiple rounds of validation to ensure both difficulty and factual accuracy. Access to GPQA is restricted through gating mechanisms to minimize the risk of data contamination. Consequently, we do not provide plain text examples from this dataset, as requested by the authors.",
68
  "leaderboard_ifeval": "IFEval is a dataset designed to test a model’s ability to follow explicit instructions, such as “include keyword x” or “use format y.” The focus is on the model’s adherence to formatting instructions rather than the content generated, allowing for the use of strict and rigorous metrics.",
69
  # "leaderboard_math_hard": "MATH is a compilation of high-school level competition problems gathered from several sources, formatted consistently using Latex for equations and Asymptote for figures. Generations must fit a very specific output format. We keep only level 5 MATH questions and call it MATH Lvl 5.",
70
  "leaderboard_math": "MATH is a compilation of high-school level competition problems gathered from several sources, formatted consistently using Latex for equations and Asymptote for figures. Generations must fit a very specific output format. We keep only level 5 MATH questions and call it MATH Lvl 5.",
 
64
 
65
  TASK_DESCRIPTIONS = {
66
  "leaderboard_bbh": "BBH is a subset of 23 challenging tasks from the BigBench dataset to evaluate language models. The tasks use objective metrics, are highly difficult, and have sufficient sample sizes for statistical significance. They include multistep arithmetic, algorithmic reasoning (e.g., boolean expressions, SVG shapes), language understanding (e.g., sarcasm detection, name disambiguation), and world knowledge. BBH performance correlates well with human preferences, providing valuable insights into model capabilities.",
67
+ "leaderboard_gpqa": "GPQA is a highly challenging knowledge dataset with questions crafted by PhD-level domain experts in fields like biology, physics, and chemistry. These questions are designed to be difficult for laypersons but relatively easy for experts. The dataset has undergone multiple rounds of validation to ensure both difficulty and factual accuracy.\n\nWe require login to see the details because access to GPQA is restricted to minimize the risk of data contamination.",
68
  "leaderboard_ifeval": "IFEval is a dataset designed to test a model’s ability to follow explicit instructions, such as “include keyword x” or “use format y.” The focus is on the model’s adherence to formatting instructions rather than the content generated, allowing for the use of strict and rigorous metrics.",
69
  # "leaderboard_math_hard": "MATH is a compilation of high-school level competition problems gathered from several sources, formatted consistently using Latex for equations and Asymptote for figures. Generations must fit a very specific output format. We keep only level 5 MATH questions and call it MATH Lvl 5.",
70
  "leaderboard_math": "MATH is a compilation of high-school level competition problems gathered from several sources, formatted consistently using Latex for equations and Asymptote for figures. Generations must fit a very specific output format. We keep only level 5 MATH questions and call it MATH Lvl 5.",
src/details.py CHANGED
@@ -15,7 +15,7 @@ def update_task_description_component(task):
15
  return gr.Textbox(
16
  description,
17
  label="Task Description",
18
- lines=5,
19
  visible=True,
20
  )
21
 
 
15
  return gr.Textbox(
16
  description,
17
  label="Task Description",
18
+ lines=6,
19
  visible=True,
20
  )
21