glecorve commited on
Commit
bf9e693
1 Parent(s): 5aedbf2

Inflate JSON dataset

Browse files
Files changed (6) hide show
  1. .gitattributes +3 -0
  2. README.md +48 -0
  3. json/test.json +3 -0
  4. json/train.json +3 -0
  5. json/valid.json +3 -0
  6. paraqa-sparqltotext.py +128 -0
.gitattributes CHANGED
@@ -53,3 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ json/test.json filter=lfs diff=lfs merge=lfs -text
57
+ json/train.json filter=lfs diff=lfs merge=lfs -text
58
+ json/valid.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - qa
4
+ - sparql
5
+ language:
6
+ - en
7
+ access: private
8
+ dataset_info:
9
+ features:
10
+ - name: uid
11
+ dtype: string
12
+ - name: query
13
+ dtype: string
14
+ - name: question
15
+ dtype: string
16
+ - name: simplified_query
17
+ dtype: string
18
+ - name: answer
19
+ dtype: string
20
+ - name: verbalized_answer
21
+ dtype: string
22
+ - name: verbalized_answer_2
23
+ dtype: string
24
+ - name: verbalized_answer_3
25
+ dtype: string
26
+ - name: verbalized_answer_4
27
+ dtype: string
28
+ - name: verbalized_answer_5
29
+ dtype: string
30
+ - name: verbalized_answer_6
31
+ dtype: string
32
+ - name: verbalized_answer_7
33
+ dtype: string
34
+ - name: verbalized_answer_8
35
+ dtype: string
36
+ splits:
37
+ - name: train
38
+ num_bytes: 2540548
39
+ num_examples: 3500
40
+ - name: validation
41
+ num_bytes: 369571
42
+ num_examples: 500
43
+ - name: test
44
+ num_bytes: 722302
45
+ num_examples: 1000
46
+ download_size: 5412519
47
+ dataset_size: 3632421
48
+ ---
json/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcdfa54a2eeeba051356fd4d8a9df24697e5df2dbb5e1145a49a070cba5bcb9a
3
+ size 1078328
json/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4311726b0c60a05d3c5e3663841036aec6881430c8ff81f1c82896c395b1715
3
+ size 3786616
json/valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:426b1e0ac7acdcbda0ba331e8877f80e7f605e99aaea3c2384ca99b23aea3433
3
+ size 547575
paraqa-sparqltotext.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+ import json
4
+ import base64
5
+
6
+ import datasets
7
+
8
+ try:
9
+ import gitlab
10
+ except ImportError:
11
+ print("ERROR: To be able to retrieve this dataset you need to install the `python-gitlab` package")
12
+
13
+ _CITATION = """\
14
+ @inproceedings{lecorve2022sparql2text,
15
+ title={Coqar: Question rewriting on coqa},
16
+ author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
17
+ journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
18
+ year={2022}
19
+ }
20
+ """
21
+
22
+ _HOMEPAGE = ""
23
+
24
+ _URLS = {
25
+ "train": "json/train.json",
26
+ "valid": "json/valid.json",
27
+ "test": "json/test.json"
28
+ }
29
+
30
+ _DESCRIPTION = """\
31
+ Special version of ParaQA for the SPARQL-to-Text task
32
+ """
33
+
34
+
35
+ class ParaQA_SPARQL2Text(datasets.GeneratorBasedBuilder):
36
+ """
37
+ ParaQA-SPARQL2Text: Special version of ParaQA for the SPARQL-to-Text task
38
+ """
39
+
40
+ VERSION = datasets.Version("1.0.0")
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ # This is the description that will appear on the datasets page.
45
+ description=_DESCRIPTION,
46
+ # datasets.features.FeatureConnectors
47
+ features=datasets.Features(
48
+ {
49
+ "uid": datasets.Value('string'),
50
+ "query": datasets.Value('string'),
51
+ "question": datasets.Value('string'),
52
+ "simplified_query": datasets.Value('string'),
53
+ "answer": datasets.Value('string'),
54
+ "verbalized_answer": datasets.Value('string'),
55
+ "verbalized_answer_2": datasets.Value('string'),
56
+ "verbalized_answer_3": datasets.Value('string'),
57
+ "verbalized_answer_4": datasets.Value('string'),
58
+ "verbalized_answer_5": datasets.Value('string'),
59
+ "verbalized_answer_6": datasets.Value('string'),
60
+ "verbalized_answer_7": datasets.Value('string'),
61
+ "verbalized_answer_8": datasets.Value('string')
62
+ }
63
+ ),
64
+ # If there's a common (input, target) tuple from the features,
65
+ # specify them here. They'll be used if as_supervised=True in
66
+ # builder.as_dataset
67
+ supervised_keys=("simplified_query", "question"),
68
+ # Homepage of the dataset for documentation
69
+ homepage=_HOMEPAGE,
70
+ citation=_CITATION,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+ """Returns SplitGenerators."""
75
+ # Downloads the data and defines the splits
76
+ # dl_manager is a datasets.download.DownloadManager that can be used to
77
+ # download and extract URLs
78
+ paths = dl_manager.download_and_extract(_URLS)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={"filepath": dl_manager.extract(paths['train']),
83
+ "split": "train"}
84
+ ),
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.VALIDATION,
87
+ gen_kwargs={"filepath": dl_manager.extract(paths['valid']),
88
+ "split": "valid"}
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST,
92
+ gen_kwargs={"filepath": dl_manager.extract(paths['test']),
93
+ "split": "test"}
94
+ )
95
+ ]
96
+
97
+
98
+ def _generate_examples(self, filepath, split):
99
+ """Yields examples."""
100
+
101
+ def transform_sample(original_sample):
102
+ transformed_sample = {
103
+ "uid": "",
104
+ "query": "",
105
+ "question": "",
106
+ "simplified_query": "",
107
+ "answer": "",
108
+ "verbalized_answer": "",
109
+ "verbalized_answer_2": "",
110
+ "verbalized_answer_3": "",
111
+ "verbalized_answer_4": "",
112
+ "verbalized_answer_5": "",
113
+ "verbalized_answer_6": "",
114
+ "verbalized_answer_7": "",
115
+ "verbalized_answer_8": ""
116
+ }
117
+ transformed_sample.update(original_sample)
118
+
119
+ return transformed_sample
120
+
121
+ # Yields (key, example) tuples from the dataset
122
+ print("Opening %s"%filepath)
123
+ with open(filepath,'r') as f:
124
+ data = json.load(f)
125
+ key = 0
126
+ for it in data:
127
+ yield key, transform_sample(it)
128
+ key += 1