ShixuanAn commited on
Commit
f5000d3
1 Parent(s): a9da7cf

Upload hugging_face.py

Browse files
Files changed (1) hide show
  1. hugging_face.py +106 -0
hugging_face.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO: Address all TODOs and remove all explanatory comments
2
+ """TODO: Add a description here."""
3
+
4
+ import csv
5
+ import json
6
+ import os
7
+ from typing import List
8
+ import datasets
9
+ import logging
10
+
11
+ # TODO: Add BibTeX citation
12
+ # Find for instance the citation on arxiv or on the dataset repo/website
13
+ _CITATION = """\
14
+ @InProceedings{huggingface:dataset,
15
+ title = {A great new dataset},
16
+ author={Shixuan An
17
+ },
18
+ year={2024}
19
+ }
20
+ """
21
+
22
+ # TODO: Add description of the dataset here
23
+ # You can copy an official description
24
+ _DESCRIPTION = """\
25
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
26
+ """
27
+
28
+ # TODO: Add a link to an official homepage for the dataset here
29
+ _HOMEPAGE = ""
30
+
31
+ # TODO: Add the licence for the dataset here if you can find it
32
+ _LICENSE = ""
33
+
34
+ # TODO: Add link to the official dataset URLs here
35
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
36
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
37
+ _URL = "https://data.mendeley.com/datasets/5ty2wb6gvg/1"
38
+ _URLS = {
39
+ "train": _URL + "train-v1.1.json",
40
+ "dev": _URL + "dev-v1.1.json",
41
+ }
42
+
43
+
44
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
45
+ class SquadDataset(datasets.GeneratorBasedBuilder):
46
+ """TODO: Short description of my dataset."""
47
+
48
+ _URLS = _URLS
49
+ VERSION = datasets.Version("1.1.0")
50
+
51
+ def _info(self):
52
+ raise ValueError('woops!')
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "id": datasets.Value("string"),
58
+ "title": datasets.Value("string"),
59
+ "context": datasets.Value("string"),
60
+ "question": datasets.Value("string"),
61
+ "answers": datasets.features.Sequence(
62
+ {"text": datasets.Value("string"), "answer_start": datasets.Value("int32"), }
63
+ ),
64
+ }
65
+ ),
66
+ # No default supervised_keys (as we have to pass both question
67
+ # and context as input).
68
+ supervised_keys=None,
69
+ homepage="https://rajpurkar.github.io/SQuAD-explorer/",
70
+ citation=_CITATION,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
74
+ urls_to_download = self._URLS
75
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
76
+
77
+ return [
78
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
79
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
80
+ ]
81
+
82
+ def _generate_examples(self, filepath):
83
+ """This function returns the examples in the raw (text) form."""
84
+ logging.info("generating examples from = %s", filepath)
85
+ with open(filepath) as f:
86
+ squad = json.load(f)
87
+ for article in squad["data"]:
88
+ title = article.get("title", "").strip()
89
+ for paragraph in article["paragraphs"]:
90
+ context = paragraph["context"].strip()
91
+ for qa in paragraph["qas"]:
92
+ question = qa["question"].strip()
93
+ id_ = qa["id"]
94
+
95
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
96
+ answers = [answer["text"].strip() for answer in qa["answers"]]
97
+
98
+ # Features currently used are "context", "question", and "answers".
99
+ # Others are extracted here for the ease of future expansions.
100
+ yield id_, {
101
+ "title": title,
102
+ "context": context,
103
+ "question": question,
104
+ "id": id_,
105
+ "answers": {"answer_start": answer_starts, "text": answers, },
106
+ }