Datasets:

Languages:
code
ArXiv:
Tags:
code
License:
Muennighoff commited on
Commit
bdf1661
1 Parent(s): fc95ebd
data/python/data/humanevalbugs.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
humaneval-x-bugs.py CHANGED
@@ -57,42 +57,42 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
57
  name="python",
58
  description="Python HumanEvalBugs",
59
  features=[
60
- "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
61
  ]
62
  ),
63
  HumanEvalXBugsConfig(
64
  name="cpp",
65
  description="C++ HumanEvalBugs",
66
  features=[
67
- "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
68
  ]
69
  ),
70
  HumanEvalXBugsConfig(
71
  name="go",
72
  description="Go HumanEvalBugs",
73
  features=[
74
- "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
75
  ]
76
  ),
77
  HumanEvalXBugsConfig(
78
  name="java",
79
  description="Java HumanEvalBugs",
80
  features=[
81
- "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
82
  ]
83
  ),
84
  HumanEvalXBugsConfig(
85
  name="js",
86
  description="JavaScript HumanEvalBugs",
87
  features=[
88
- "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
89
  ]
90
  ),
91
  HumanEvalXBugsConfig(
92
  name="rust",
93
  description="JavaScript HumanEvalBugs",
94
  features=[
95
- "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point"
96
  ]
97
  ),
98
  ]
@@ -115,6 +115,9 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
115
  "test": datasets.Value("string"),
116
  "test_setup": datasets.Value("string"),
117
  "example_test": datasets.Value("string"),
 
 
 
118
  }
119
  ),
120
  homepage=_HOMEPAGE,
@@ -158,5 +161,8 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
158
  "test": row["test"],
159
  "test_setup": row.get("test_setup", ""), # Only for Go
160
  "example_test": row["example_test"],
 
 
 
161
  }
162
  key += 1
 
57
  name="python",
58
  description="Python HumanEvalBugs",
59
  features=[
60
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
61
  ]
62
  ),
63
  HumanEvalXBugsConfig(
64
  name="cpp",
65
  description="C++ HumanEvalBugs",
66
  features=[
67
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
68
  ]
69
  ),
70
  HumanEvalXBugsConfig(
71
  name="go",
72
  description="Go HumanEvalBugs",
73
  features=[
74
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
75
  ]
76
  ),
77
  HumanEvalXBugsConfig(
78
  name="java",
79
  description="Java HumanEvalBugs",
80
  features=[
81
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
82
  ]
83
  ),
84
  HumanEvalXBugsConfig(
85
  name="js",
86
  description="JavaScript HumanEvalBugs",
87
  features=[
88
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
89
  ]
90
  ),
91
  HumanEvalXBugsConfig(
92
  name="rust",
93
  description="JavaScript HumanEvalBugs",
94
  features=[
95
+ "task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
96
  ]
97
  ),
98
  ]
 
115
  "test": datasets.Value("string"),
116
  "test_setup": datasets.Value("string"),
117
  "example_test": datasets.Value("string"),
118
+ "signature": datasets.Value("string"),
119
+ "docstring": datasets.Value("string"),
120
+ "instruction": datasets.Value("string"),
121
  }
122
  ),
123
  homepage=_HOMEPAGE,
 
161
  "test": row["test"],
162
  "test_setup": row.get("test_setup", ""), # Only for Go
163
  "example_test": row["example_test"],
164
+ "signature": row["signature"],
165
+ "docstring": row["docstring"],
166
+ "instruction": row["instruction"],
167
  }
168
  key += 1