albertvillanova HF staff commited on
Commit
b023a2b
1 Parent(s): 01fdde2

Delete loading script

Browse files
Files changed (1) hide show
  1. parsinlu_reading_comprehension.py +0 -141
parsinlu_reading_comprehension.py DELETED
@@ -1,141 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ParsiNLU Persian reading comprehension task"""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- logger = datasets.logging.get_logger(__name__)
24
-
25
- _CITATION = """\
26
- @article{huggingface:dataset,
27
- title = {ParsiNLU: A Suite of Language Understanding Challenges for Persian},
28
- authors = {Khashabi, Daniel and Cohan, Arman and Shakeri, Siamak and Hosseini, Pedram and Pezeshkpour, Pouya and Alikhani, Malihe and Aminnaseri, Moin and Bitaab, Marzieh and Brahman, Faeze and Ghazarian, Sarik and others},
29
- year={2020}
30
- journal = {arXiv e-prints},
31
- eprint = {2012.06154},
32
- }
33
- """
34
-
35
- # You can copy an official description
36
- _DESCRIPTION = """\
37
- A Persian reading comprehenion task (generating an answer, given a question and a context paragraph).
38
- The questions are mined using Google auto-complete, their answers and the corresponding evidence documents are manually annotated by native speakers.
39
- """
40
-
41
- _HOMEPAGE = "https://github.com/persiannlp/parsinlu/"
42
-
43
- _LICENSE = "CC BY-NC-SA 4.0"
44
-
45
- _URL = "https://raw.githubusercontent.com/persiannlp/parsinlu/master/data/reading_comprehension/"
46
- _URLs = {
47
- "train": _URL + "train.jsonl",
48
- "dev": _URL + "dev.jsonl",
49
- "test": _URL + "eval.jsonl",
50
- }
51
-
52
-
53
- class ParsinluReadingComprehension(datasets.GeneratorBasedBuilder):
54
- """ParsiNLU Persian reading comprehension task."""
55
-
56
- VERSION = datasets.Version("1.0.0")
57
-
58
- BUILDER_CONFIGS = [
59
- datasets.BuilderConfig(
60
- name="parsinlu-repo", version=VERSION, description="ParsiNLU repository: reading-comprehension"
61
- ),
62
- ]
63
-
64
- def _info(self):
65
- features = datasets.Features(
66
- {
67
- "question": datasets.Value("string"),
68
- "url": datasets.Value("string"),
69
- "context": datasets.Value("string"),
70
- "answers": datasets.features.Sequence(
71
- {
72
- "answer_start": datasets.Value("int32"),
73
- "answer_text": datasets.Value("string"),
74
- }
75
- ),
76
- }
77
- )
78
-
79
- return datasets.DatasetInfo(
80
- # This is the description that will appear on the datasets page.
81
- description=_DESCRIPTION,
82
- # This defines the different columns of the dataset and their types
83
- features=features, # Here we define them above because they are different between the two configurations
84
- # If there's a common (input, target) tuple from the features,
85
- # specify them here. They'll be used if as_supervised=True in
86
- # builder.as_dataset.
87
- supervised_keys=None,
88
- # Homepage of the dataset for documentation
89
- homepage=_HOMEPAGE,
90
- # License for the dataset if available
91
- license=_LICENSE,
92
- # Citation for the dataset
93
- citation=_CITATION,
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- data_dir = dl_manager.download_and_extract(_URLs)
98
- return [
99
- datasets.SplitGenerator(
100
- name=datasets.Split.TRAIN,
101
- # These kwargs will be passed to _generate_examples
102
- gen_kwargs={
103
- "filepath": data_dir["train"],
104
- "split": "train",
105
- },
106
- ),
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TEST,
109
- # These kwargs will be passed to _generate_examples
110
- gen_kwargs={"filepath": data_dir["test"], "split": "test"},
111
- ),
112
- datasets.SplitGenerator(
113
- name=datasets.Split.VALIDATION,
114
- # These kwargs will be passed to _generate_examples
115
- gen_kwargs={
116
- "filepath": data_dir["dev"],
117
- "split": "dev",
118
- },
119
- ),
120
- ]
121
-
122
- def _generate_examples(self, filepath, split):
123
- logger.info("generating examples from = %s", filepath)
124
-
125
- def get_answer_index(passage, answer):
126
- return passage.index(answer) if answer in passage else -1
127
-
128
- with open(filepath, encoding="utf-8") as f:
129
- for id_, row in enumerate(f):
130
- data = json.loads(row)
131
- answer = data["answers"]
132
- if type(answer[0]) == str:
133
- answer = [{"answer_start": get_answer_index(data["passage"], x), "answer_text": x} for x in answer]
134
- else:
135
- answer = [{"answer_start": x[0], "answer_text": x[1]} for x in answer]
136
- yield id_, {
137
- "question": data["question"],
138
- "url": str(data["url"]),
139
- "context": data["passage"],
140
- "answers": answer,
141
- }