hojjat-m commited on
Commit
5ff936f
1 Parent(s): 6ca3547

Create parsinlu-multiple-choice.py

Browse files
Files changed (1) hide show
  1. parsinlu-multiple-choice.py +118 -0
parsinlu-multiple-choice.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ParsiNLU Persian multiple choice task (Mostly copy-paste from https://huggingface.co/datasets/persiannlp/parsinlu_reading_comprehension/blob/main/parsinlu_reading_comprehension.py)"""
2
+
3
+
4
+ from __future__ import absolute_import, division, print_function
5
+
6
+ import json
7
+
8
+ import datasets
9
+
10
+
11
+ logger = datasets.logging.get_logger(__name__)
12
+
13
+ _CITATION = """\
14
+ @article{huggingface:dataset,
15
+ title = {ParsiNLU: A Suite of Language Understanding Challenges for Persian},
16
+ authors = {Khashabi, Daniel and Cohan, Arman and Shakeri, Siamak and Hosseini, Pedram and Pezeshkpour, Pouya and Alikhani, Malihe and Aminnaseri, Moin and Bitaab, Marzieh and Brahman, Faeze and Ghazarian, Sarik and others},
17
+ year={2020}
18
+ journal = {arXiv e-prints},
19
+ eprint = {2012.06154},
20
+ }
21
+ """
22
+
23
+ # You can copy an official description
24
+ _DESCRIPTION = """A Persian multiple choice task."""
25
+
26
+ _HOMEPAGE = "https://github.com/persiannlp/parsinlu/"
27
+
28
+ _LICENSE = "CC BY-NC-SA 4.0"
29
+
30
+ _URL = "https://raw.githubusercontent.com/persiannlp/parsinlu/master/data/multiple_choice/"
31
+ _URLs = {
32
+ "train": _URL + "train.jsonl",
33
+ "val": _URL + "valid.jsonl",
34
+ "test": _URL + "test.jsonl",
35
+ }
36
+
37
+
38
+ class ParsinluMultipleChoice(datasets.GeneratorBasedBuilder):
39
+ """ParsiNLU Persian multiple choice task."""
40
+
41
+ VERSION = datasets.Version("1.0.0")
42
+
43
+ BUILDER_CONFIGS = [
44
+ datasets.BuilderConfig(
45
+ name="parsinlu-repo", version=VERSION, description="ParsiNLU repository: multiple choice"
46
+ ),
47
+ ]
48
+
49
+ def _info(self):
50
+ features = datasets.Features(
51
+ {
52
+ "answer": datasets.Value("int64"),
53
+ "candidates": datasets.features.Sequence(feature=Value(dtype='string', id=None), length=-1),
54
+ "category": datasets.Value("string"),
55
+ "question": datasets.Value("string"),
56
+ "id": datasets.Value("string")
57
+ }
58
+ )
59
+
60
+ return datasets.DatasetInfo(
61
+ # This is the description that will appear on the datasets page.
62
+ description=_DESCRIPTION,
63
+ # This defines the different columns of the dataset and their types
64
+ features=features, # Here we define them above because they are different between the two configurations
65
+ # If there's a common (input, target) tuple from the features,
66
+ # specify them here. They'll be used if as_supervised=True in
67
+ # builder.as_dataset.
68
+ supervised_keys=None,
69
+ # Homepage of the dataset for documentation
70
+ homepage=_HOMEPAGE,
71
+ # License for the dataset if available
72
+ license=_LICENSE,
73
+ # Citation for the dataset
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ data_dir = dl_manager.download_and_extract(_URLs)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ # These kwargs will be passed to _generate_examples
83
+ gen_kwargs={
84
+ "filepath": data_dir["train"],
85
+ "split": "train",
86
+ },
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TEST,
90
+ # These kwargs will be passed to _generate_examples
91
+ gen_kwargs={"filepath": data_dir["test"], "split": "test"},
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.VALIDATION,
95
+ # These kwargs will be passed to _generate_examples
96
+ gen_kwargs={
97
+ "filepath": data_dir["val"],
98
+ "split": "validation",
99
+ },
100
+ ),
101
+ ]
102
+
103
+ def _generate_examples(self, filepath, split):
104
+ logger.info("generating examples from = %s", filepath)
105
+
106
+ def get_answer_index(passage, answer):
107
+ return passage.index(answer) if answer in passage else -1
108
+
109
+ with open(filepath, encoding="utf-8") as f:
110
+ for id_, row in enumerate(f):
111
+ data = json.loads(row)
112
+ yield id_, {
113
+ "answer": data["question"],
114
+ "candidates": data["candidates"],
115
+ "category": data["category"],
116
+ "question": data["question"],
117
+ "id": data['id']
118
+ }