File size: 3,295 Bytes
2581602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e81b1f
 
 
2581602
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import json
import csv
import datasets
import requests
import os

_CITATION = """\\
@article{ParsBERT,
    title={ParsBERT: Transformer-based Model for Persian Language Understanding},
    author={Mehrdad Farahani, Mohammad Gharachorloo, Marzieh Farahani, Mohammad Manthouri},
    journal={ArXiv},
    year={2020},
    volume={abs/2005.12515}
}
"""
_DESCRIPTION = """\\\\\\\\
A dataset of various news articles scraped from different online news agencies’ websites. The total number of articles is 16,438, spread over eight different classes.
"""

_DRIVE_URL = "https://drive.google.com/uc?export=download&id=1B6xotfXCcW9xS1mYSBQos7OCg0ratzKC"

class PersianNewsConfig(datasets.BuilderConfig):
    """BuilderConfig for PersianNews Dataset."""
    def __init__(self, **kwargs):
        super(PersianNewsConfig, self).__init__(**kwargs)


class PersianNews(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        PersianNewsConfig(name="Persian News", version=datasets.Version("1.0.0"), description="persian classification dataset on online agencie's articles"),
    ]
    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # datasets.features.FeatureConnectors
            features=datasets.Features(
                {
                    "content": datasets.Value("string"),
                    "label": datasets.Value("string"),
                    "label_id": datasets.Value(dtype='int64')
                }
            ),
            supervised_keys=None,
            # Homepage of the dataset for documentation
            homepage="https://hooshvare.github.io/docs/datasets/tc#persian-news",
            citation=_CITATION,
        )

    def custom_dataset(self, src_url, dest_path):
        response = requests.get(src_url)
        response.raise_for_status()

        with open(dest_path, 'wb') as f:
            f.write(response.content)

        
    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # dl_manager is a datasets.download.DownloadManager that can be used to
        # download and extract URLs
    
        downloaded_file = dl_manager.download_custom(_DRIVE_URL, self.custom_dataset)
        extracted_file = dl_manager.extract(downloaded_file)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(extracted_file, 'persian_news/train.csv')}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(extracted_file, 'persian_news/test.csv')}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(extracted_file, 'persian_news/dev.csv')}),
        ]

    def _generate_examples(self, filepath):
        try:
            with open(filepath, encoding="utf-8") as f:
                reader = csv.DictReader(f, delimiter="\t")
                for idx, row in enumerate(reader):
                    yield idx, {
                        "content": row["content"],
                        "label": row["label"],
                        "label_id": row["label_id"],
                    }
        except Exception as e:
            print(e)