hofarah commited on
Commit
b2a1325
1 Parent(s): 25d9daa

Delete long-summarization-persian.py

Browse files
Files changed (1) hide show
  1. long-summarization-persian.py +0 -92
long-summarization-persian.py DELETED
@@ -1,92 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import csv
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- This new dataset is designed to solve persian long summarization tasks.
24
- """
25
-
26
- _URL = "./"
27
-
28
- class long_summarization_persianConfig(datasets.BuilderConfig):
29
-
30
- def __init__(self, **kwargs):
31
- """BuilderConfig for long_summarization_persian.
32
- Args:
33
- **kwargs: keyword arguments forwarded to super.
34
- """
35
- super(long_summarization_persianConfig, self).__init__(**kwargs)
36
-
37
- class long_summarization_persian(datasets.GeneratorBasedBuilder):
38
-
39
- BUILDER_CONFIGS = [
40
- long_summarization_persianConfig(
41
- name="long_summarization_persian",
42
- version=datasets.Version("1.0.0"),
43
- description="long_summarization_persian dataset",
44
- ),
45
- ]
46
-
47
- def _info(self):
48
-
49
- return datasets.DatasetInfo(
50
- description=_DESCRIPTION,
51
- features=datasets.Features(
52
- {
53
- "id": datasets.Value("string"),
54
- "article": datasets.Value("string"),
55
- "summary": datasets.Value("string")
56
- }
57
- ),
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- urls_to_download = {
62
- "train": f"{_URL}train.csv",
63
- "test": f"{_URL}test.csv",
64
- "validation": f"{_URL}validation.csv",
65
- }
66
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
67
-
68
- return [
69
- datasets.SplitGenerator(
70
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}
71
- ),
72
- datasets.SplitGenerator(
73
- name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}
74
- ),
75
- datasets.SplitGenerator(
76
- name=datasets.Split.VALIDATION,
77
- gen_kwargs={"filepath": downloaded_files["validation"]},
78
- ),
79
- ]
80
-
81
-
82
- def _generate_examples(self, filepath):
83
- """This function returns the examples in the raw (text) form."""
84
- with open(filepath, encoding="utf-8") as f:
85
- data = csv.DictReader(f)
86
- for id_, article in enumerate(data):
87
- article = article["article"]
88
- summary = article["summary"]
89
- yield id_, {
90
- "article": article,
91
- "summary": summary,
92
- }