hofarah commited on
Commit
96a383c
1 Parent(s): 1faadc5

Create long-summarization-persian.py

Browse files
Files changed (1) hide show
  1. long-summarization-persian.py +71 -0
long-summarization-persian.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import csv
17
+ import json
18
+ import os
19
+
20
+ import datasets
21
+
22
+
23
+ _DESCRIPTION = """\
24
+ This new dataset is designed to solve persian long summarization tasks.
25
+ """
26
+
27
+ _URLS = {
28
+ "train": "https://huggingface.co/datasets/zedfum/long-summarization-persian/blob/main/train.csv",
29
+ "validation": "https://huggingface.co/datasets/zedfum/long-summarization-persian/blob/main/validation.csv",
30
+ "test": "https://huggingface.co/datasets/zedfum/long-summarization-persian/blob/main/test.csv",
31
+ }
32
+
33
+ class NewDataset(datasets.GeneratorBasedBuilder):
34
+
35
+ VERSION = datasets.Version("1.0.0")
36
+
37
+
38
+ def _info(self):
39
+
40
+ return datasets.DatasetInfo(
41
+ description=_DESCRIPTION,
42
+ features=datasets.Features(
43
+ {
44
+ "id": datasets.Value("string"),
45
+ "article": datasets.Value("string"),
46
+ "summary": datasets.Value("string")
47
+ }
48
+ ),
49
+ )
50
+
51
+ def _split_generators(self, dl_manager):
52
+ urls_to_download = _URLS
53
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
54
+
55
+ return [
56
+ datasets.SplitGenerator(
57
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}
58
+ ),
59
+ datasets.SplitGenerator(
60
+ name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}
61
+ ),
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.VALIDATION,
64
+ gen_kwargs={"filepath": downloaded_files["validation"]},
65
+ ),
66
+ ]
67
+ def _generate_examples(self, filepath, split):
68
+ df = pd.read_csv(filepath)
69
+ for idx, example in enumerate(df.itertuples(index=False)):
70
+ yield idx, {"id":example.id,"article": example.article, "summary": example.summary}
71
+ }