multilingual-sentiments / multilingual-sentiments.py
tyqiangz's picture
added data source column to all dataset splits
1a50b3c
raw
history blame contribute delete
No virus
6.23 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import textwrap
import pandas as pd
import datasets
import pandas as pd
LANGUAGES = ['malay', 'hindi', 'japanese', 'german',
'italian', 'english', 'portuguese', 'french',
'spanish', 'chinese', 'indonesian', 'arabic'
]
class MultilingualSentimentsConfig(datasets.BuilderConfig):
"""BuilderConfig for Multilingual Sentiments"""
def __init__(
self,
text_features,
label_column,
label_classes,
train_url,
valid_url,
test_url,
citation,
**kwargs,
):
"""BuilderConfig for Multilingual Sentiments.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the txt/csv/tsv file
label_column: `string`, name of the column in the txt/csv/tsv file corresponding
to the label
label_classes: `list[string]`, the list of classes if the label is categorical
train_url: `string`, url to train file from
valid_url: `string`, url to valid file from
test_url: `string`, url to test file from
citation: `string`, citation for the data set
**kwargs: keyword arguments forwarded to super.
"""
super(MultilingualSentimentsConfig, self).__init__(
version=datasets.Version("1.0.0", ""), **kwargs)
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.train_url = train_url
self.valid_url = valid_url
self.test_url = test_url
self.citation = citation
class MultilingualSentiments(datasets.GeneratorBasedBuilder):
"""Multilingual Sentiments benchmark"""
BUILDER_CONFIGS = []
BUILDER_CONFIGS.append(
MultilingualSentimentsConfig(
name="all",
description=textwrap.dedent(
f"""\
All datasets."""
),
text_features={"text": "text", "source": "source", "language": "language"},
label_classes=["positive", "neutral", "negative"],
label_column="label",
train_url=f"https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/data/all/train.csv",
valid_url=f"https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/data/all/valid.csv",
test_url=f"https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/data/all/test.csv",
citation=textwrap.dedent(
f"""\
All citation"""
),
),
)
for lang in LANGUAGES:
BUILDER_CONFIGS.append(
MultilingualSentimentsConfig(
name=lang,
description=textwrap.dedent(
f"""\
{lang} dataset."""
),
text_features={"text": "text", "source": "source"},
label_classes=["positive", "neutral", "negative"],
label_column="label",
train_url=f"https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/data/{lang}/train.csv",
valid_url=f"https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/data/{lang}/valid.csv",
test_url=f"https://raw.githubusercontent.com/tyqiangz/multilingual-sentiment-datasets/main/data/{lang}/test.csv",
citation=textwrap.dedent(
f"""\
{lang} citation"""
),
),
)
def _info(self):
features = {text_feature: datasets.Value(
"string") for text_feature in self.config.text_features}
features["label"] = datasets.features.ClassLabel(
names=self.config.label_classes)
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
citation=self.config.citation,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_path = dl_manager.download_and_extract(self.config.train_url)
valid_path = dl_manager.download_and_extract(self.config.valid_url)
test_path = dl_manager.download_and_extract(self.config.test_url)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={
"filepath": valid_path}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
"filepath": test_path}),
]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath)
print('-'*100)
print(df.head())
print('-'*100)
for id_, row in df.iterrows():
if self.config.name != "all":
text = row["text"]
label = row["label"]
source = row["source"]
yield id_, {"text": text, "label": label, "source": source}
else:
text = row["text"]
label = row["label"]
source = row["source"]
language = row["language"]
yield id_, {"text": text, "label": label, "source": source, "language": language}