Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
685b95a
1 Parent(s): 329d529

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (ee0c881f8236471e62927615e5c880c79b1b5fb6)
- Delete loading script (874f0483927306e1ab13c53a024182d8176047c6)

README.md CHANGED
@@ -32,18 +32,28 @@ dataset_info:
32
  '0': negative
33
  '1': positive
34
  '2': no_impact
 
35
  splits:
36
  - name: train
37
- num_bytes: 48555
38
  num_examples: 892
39
  - name: validation
40
- num_bytes: 5788
41
  num_examples: 105
42
  - name: test
43
- num_bytes: 5588
44
  num_examples: 104
45
- download_size: 49870
46
- dataset_size: 59931
 
 
 
 
 
 
 
 
 
47
  train-eval-index:
48
  - config: default
49
  task: text-classification
 
32
  '0': negative
33
  '1': positive
34
  '2': no_impact
35
+ '3': mixed
36
  splits:
37
  - name: train
38
+ num_bytes: 48551
39
  num_examples: 892
40
  - name: validation
41
+ num_bytes: 5784
42
  num_examples: 105
43
  - name: test
44
+ num_bytes: 5584
45
  num_examples: 104
46
+ download_size: 48150
47
+ dataset_size: 59919
48
+ configs:
49
+ - config_name: default
50
+ data_files:
51
+ - split: train
52
+ path: data/train-*
53
+ - split: validation
54
+ path: data/validation-*
55
+ - split: test
56
+ path: data/test-*
57
  train-eval-index:
58
  - config: default
59
  task: text-classification
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d641bfc593a0a35b8af376f76fab573953476eb8affc91d1ae07c16d8479ac0
3
+ size 6161
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ca6372cf331a5db4de59d7a4de11710315c84866a868e65ae05eb4964465737
3
+ size 35646
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97408b0166f4a456289d2d8ab8429bcac5c47b921d14dc92ecdad8b59732abb3
3
+ size 6343
poem_sentiment.py DELETED
@@ -1,84 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Poem Sentiment: A sentiment dataset of poem verses"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @misc{sheng2020investigating,
23
- title={Investigating Societal Biases in a Poetry Composition System},
24
- author={Emily Sheng and David Uthus},
25
- year={2020},
26
- eprint={2011.02686},
27
- archivePrefix={arXiv},
28
- primaryClass={cs.CL}
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- Poem Sentiment is a sentiment dataset of poem verses from Project Gutenberg. \
34
- This dataset can be used for tasks such as sentiment classification or style transfer for poems.
35
- """
36
-
37
-
38
- _HOMEPAGE = "https://github.com/google-research-datasets/poem-sentiment"
39
-
40
- _BASE_URL = "https://raw.githubusercontent.com/google-research-datasets/poem-sentiment/master/data/"
41
- _URLS = {
42
- "train": f"{_BASE_URL}/train.tsv",
43
- "dev": f"{_BASE_URL}/dev.tsv",
44
- "test": f"{_BASE_URL}/test.tsv",
45
- }
46
- _LABEL_MAPPING = {-1: 0, 0: 2, 1: 1, 2: 3}
47
-
48
-
49
- class PoemSentiment(datasets.GeneratorBasedBuilder):
50
- """Poem Sentiment: A sentiment dataset of poem verses"""
51
-
52
- VERSION = datasets.Version("1.0.0")
53
-
54
- def _info(self):
55
- return datasets.DatasetInfo(
56
- description=_DESCRIPTION,
57
- features=datasets.Features(
58
- {
59
- "id": datasets.Value("int32"),
60
- "verse_text": datasets.Value("string"),
61
- "label": datasets.ClassLabel(names=["negative", "positive", "no_impact", "mixed"]),
62
- }
63
- ),
64
- supervised_keys=None,
65
- homepage=_HOMEPAGE,
66
- citation=_CITATION,
67
- )
68
-
69
- def _split_generators(self, dl_manager):
70
- downloaded_files = dl_manager.download(_URLS)
71
- return [
72
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
73
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
74
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
75
- ]
76
-
77
- def _generate_examples(self, filepath):
78
- with open(filepath, encoding="utf-8") as f:
79
- lines = f.readlines()
80
- for line in lines:
81
- fields = line.strip().split("\t")
82
- idx, verse_text, label = fields
83
- label = _LABEL_MAPPING[int(label)]
84
- yield int(idx), {"id": int(idx), "verse_text": verse_text, "label": label}