tianbaoxiexxx
commited on
Commit
•
fade991
1
Parent(s):
c691da3
Fix bugs in msr_sqa dataset (#3715)
Browse files* Fix problems in msr_sqa
* Update metadata JSON
* Update version
* Update dummy data version
* Update metadata JSON
Co-authored-by: Tianbao Xie <[email protected]>
Co-authored-by: Albert Villanova del Moral <[email protected]>
Commit from https://github.com/huggingface/datasets/commit/55924c5e3b823a3b1206269bb0892cd3a9508570
- dataset_infos.json +1 -1
- dummy/{0.0.0 → 1.0.0}/dummy_data.zip +2 -2
- msr_sqa.py +24 -8
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"default": {"description": "Recent work in semantic parsing for question answering has focused on long and complicated questions, many of which would seem unnatural if asked in a normal conversation between two humans. In an effort to explore a conversational QA setting, we present a more realistic task: answering sequences of simple but inter-related questions. We created SQA by asking crowdsourced workers to decompose 2,022 questions from WikiTableQuestions (WTQ), which contains highly-compositional questions about tables from Wikipedia. We had three workers decompose each WTQ question, resulting in a dataset of 6,066 sequences that contain 17,553 questions in total. Each question is also associated with answers in the form of cell locations in the tables.\n", "citation": "@inproceedings{iyyer2017search,\n title={Search-based neural structured learning for sequential question answering},\n author={Iyyer, Mohit and Yih, Wen-tau and Chang, Ming-Wei},\n booktitle={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},\n pages={1821--1831},\n year={2017}\n}\n", "homepage": "https://msropendata.com/datasets/b25190ed-0f59-47b1-9211-5962858142c2", "license": "Microsoft Research Data License Agreement", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "annotator": {"dtype": "int32", "id": null, "_type": "Value"}, "position": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "table_file": {"dtype": "string", "id": null, "_type": "Value"}, "table_header": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "table_data": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "answer_coordinates": {"feature": {"dtype": "
|
|
|
1 |
+
{"default": {"description": "Recent work in semantic parsing for question answering has focused on long and complicated questions, many of which would seem unnatural if asked in a normal conversation between two humans. In an effort to explore a conversational QA setting, we present a more realistic task: answering sequences of simple but inter-related questions. We created SQA by asking crowdsourced workers to decompose 2,022 questions from WikiTableQuestions (WTQ), which contains highly-compositional questions about tables from Wikipedia. We had three workers decompose each WTQ question, resulting in a dataset of 6,066 sequences that contain 17,553 questions in total. Each question is also associated with answers in the form of cell locations in the tables.\n", "citation": "@inproceedings{iyyer2017search,\n title={Search-based neural structured learning for sequential question answering},\n author={Iyyer, Mohit and Yih, Wen-tau and Chang, Ming-Wei},\n booktitle={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},\n pages={1821--1831},\n year={2017}\n}\n", "homepage": "https://msropendata.com/datasets/b25190ed-0f59-47b1-9211-5962858142c2", "license": "Microsoft Research Data License Agreement", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "annotator": {"dtype": "int32", "id": null, "_type": "Value"}, "position": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_and_history": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "table_file": {"dtype": "string", "id": null, "_type": "Value"}, "table_header": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "table_data": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "answer_coordinates": {"feature": {"row_index": {"dtype": "int32", "id": null, "_type": "Value"}, "column_index": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer_text": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "msr_sqa", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 19732499, "num_examples": 12276, "dataset_name": "msr_sqa"}, "validation": {"name": "validation", "num_bytes": 3738331, "num_examples": 2265, "dataset_name": "msr_sqa"}, "test": {"name": "test", "num_bytes": 5105873, "num_examples": 3012, "dataset_name": "msr_sqa"}}, "download_checksums": {"https://download.microsoft.com/download/1/D/C/1DC270D2-1B53-4A61-A2E3-88AB3E4E6E1F/SQA%20Release%201.0.zip": {"num_bytes": 4796932, "checksum": "791a07ef90d6e736c186b25009d3c10cb38624b879bb668033445a3ab8892f64"}}, "download_size": 4796932, "post_processing_size": null, "dataset_size": 28576703, "size_in_bytes": 33373635}}
|
dummy/{0.0.0 → 1.0.0}/dummy_data.zip
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81f4cf20ef83ae9682c85ccd9e54aafd0c10dc785980f3a844d0c59b120bc525
|
3 |
+
size 445705
|
msr_sqa.py
CHANGED
@@ -19,10 +19,11 @@ import ast
|
|
19 |
import csv
|
20 |
import os
|
21 |
|
|
|
|
|
22 |
import datasets
|
23 |
|
24 |
|
25 |
-
# TODO: Add BibTeX citation
|
26 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
27 |
_CITATION = """\
|
28 |
@inproceedings{iyyer2017search,
|
@@ -60,13 +61,16 @@ def _load_table_data(table_file):
|
|
60 |
|
61 |
Returns:
|
62 |
header: a list of headers in the table.
|
63 |
-
|
64 |
"""
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
70 |
|
71 |
|
72 |
def _parse_answer_coordinates(answer_coordinate_str):
|
@@ -113,6 +117,8 @@ def _parse_answer_text(answer_text_str):
|
|
113 |
class MsrSQA(datasets.GeneratorBasedBuilder):
|
114 |
"""Microsoft Research Sequential Question Answering (SQA) Dataset"""
|
115 |
|
|
|
|
|
116 |
def _info(self):
|
117 |
return datasets.DatasetInfo(
|
118 |
description=_DESCRIPTION,
|
@@ -122,6 +128,7 @@ class MsrSQA(datasets.GeneratorBasedBuilder):
|
|
122 |
"annotator": datasets.Value("int32"),
|
123 |
"position": datasets.Value("int32"),
|
124 |
"question": datasets.Value("string"),
|
|
|
125 |
"table_file": datasets.Value("string"),
|
126 |
"table_header": datasets.features.Sequence(datasets.Value("string")),
|
127 |
"table_data": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
|
@@ -143,7 +150,11 @@ class MsrSQA(datasets.GeneratorBasedBuilder):
|
|
143 |
return [
|
144 |
datasets.SplitGenerator(
|
145 |
name=datasets.Split.TRAIN,
|
146 |
-
gen_kwargs={"filepath": os.path.join(data_dir, "train.tsv"), "data_dir": data_dir},
|
|
|
|
|
|
|
|
|
147 |
),
|
148 |
datasets.SplitGenerator(
|
149 |
name=datasets.Split.TEST,
|
@@ -155,10 +166,15 @@ class MsrSQA(datasets.GeneratorBasedBuilder):
|
|
155 |
"""Yields examples."""
|
156 |
with open(filepath, encoding="utf-8") as f:
|
157 |
reader = csv.DictReader(f, delimiter="\t")
|
|
|
158 |
for idx, item in enumerate(reader):
|
159 |
item["answer_text"] = _parse_answer_text(item["answer_text"])
|
160 |
item["answer_coordinates"] = _parse_answer_coordinates(item["answer_coordinates"])
|
161 |
header, table_data = _load_table_data(os.path.join(data_dir, item["table_file"]))
|
162 |
item["table_header"] = header
|
163 |
item["table_data"] = table_data
|
|
|
|
|
|
|
|
|
164 |
yield idx, item
|
|
|
19 |
import csv
|
20 |
import os
|
21 |
|
22 |
+
import pandas as pd
|
23 |
+
|
24 |
import datasets
|
25 |
|
26 |
|
|
|
27 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
28 |
_CITATION = """\
|
29 |
@inproceedings{iyyer2017search,
|
|
|
61 |
|
62 |
Returns:
|
63 |
header: a list of headers in the table.
|
64 |
+
rows: 2d array of data in the table.
|
65 |
"""
|
66 |
+
rows = []
|
67 |
+
table_data = pd.read_csv(table_file)
|
68 |
+
# the first line is header
|
69 |
+
header = list(table_data.columns)
|
70 |
+
for row_data in table_data.values:
|
71 |
+
rows.append([str(_) for _ in list(row_data)])
|
72 |
+
|
73 |
+
return header, rows
|
74 |
|
75 |
|
76 |
def _parse_answer_coordinates(answer_coordinate_str):
|
|
|
117 |
class MsrSQA(datasets.GeneratorBasedBuilder):
|
118 |
"""Microsoft Research Sequential Question Answering (SQA) Dataset"""
|
119 |
|
120 |
+
VERSION = datasets.Version("1.0.0")
|
121 |
+
|
122 |
def _info(self):
|
123 |
return datasets.DatasetInfo(
|
124 |
description=_DESCRIPTION,
|
|
|
128 |
"annotator": datasets.Value("int32"),
|
129 |
"position": datasets.Value("int32"),
|
130 |
"question": datasets.Value("string"),
|
131 |
+
"question_and_history": datasets.Sequence(datasets.Value("string")),
|
132 |
"table_file": datasets.Value("string"),
|
133 |
"table_header": datasets.features.Sequence(datasets.Value("string")),
|
134 |
"table_data": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
|
|
|
150 |
return [
|
151 |
datasets.SplitGenerator(
|
152 |
name=datasets.Split.TRAIN,
|
153 |
+
gen_kwargs={"filepath": os.path.join(data_dir, "random-split-1-train.tsv"), "data_dir": data_dir},
|
154 |
+
),
|
155 |
+
datasets.SplitGenerator(
|
156 |
+
name=datasets.Split.VALIDATION,
|
157 |
+
gen_kwargs={"filepath": os.path.join(data_dir, "random-split-1-dev.tsv"), "data_dir": data_dir},
|
158 |
),
|
159 |
datasets.SplitGenerator(
|
160 |
name=datasets.Split.TEST,
|
|
|
166 |
"""Yields examples."""
|
167 |
with open(filepath, encoding="utf-8") as f:
|
168 |
reader = csv.DictReader(f, delimiter="\t")
|
169 |
+
question_and_history = []
|
170 |
for idx, item in enumerate(reader):
|
171 |
item["answer_text"] = _parse_answer_text(item["answer_text"])
|
172 |
item["answer_coordinates"] = _parse_answer_coordinates(item["answer_coordinates"])
|
173 |
header, table_data = _load_table_data(os.path.join(data_dir, item["table_file"]))
|
174 |
item["table_header"] = header
|
175 |
item["table_data"] = table_data
|
176 |
+
if item["position"] == "0":
|
177 |
+
question_and_history = [] # reset history
|
178 |
+
question_and_history.append(item["question"])
|
179 |
+
item["question_and_history"] = question_and_history
|
180 |
yield idx, item
|