|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The TID-8 (The Inherent-Disagreement-8 datasets) benchmark""" |
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_TID_8_CITATION = """\ |
|
@inproceedings{deng2023tid8, |
|
title={You Are What You Annotate: Towards Better Models through Annotator Representations}, |
|
author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada}, |
|
booktitle={Findings of EMNLP 2023}, |
|
year={2023} |
|
} |
|
Note that each TID-8 dataset has its own citation. Please see the source to |
|
get the correct citation for each contained dataset. |
|
""" |
|
|
|
_TID_8_DESCRIPTION = """\ |
|
TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement. |
|
""" |
|
|
|
_FIA_DESCRIPTION = """\ |
|
Friends QIA (Damgaard et al., 2021) is a |
|
corpus of classifying indirect answers to polar questions.""" |
|
|
|
_PEJ_DESCRIPTION = """\ |
|
Pejorative (Dinu et al., 2021) classifies |
|
whether Tweets contain words that are used pejora- |
|
tively. By definition, pejorative words are words or |
|
phrases that have negative connotations or that are |
|
intended to disparage or belittle.""" |
|
|
|
_HSB_DESCRIPTION = """\ |
|
HS-Brexit (Akhtar et al., 2021) is an abu- |
|
sive language detection corpus on Brexit belonging |
|
to two distinct groups: a target group of three Mus- |
|
lim immigrants in the UK, and a control group of |
|
three other individuals.""" |
|
|
|
_MDA_DESCRIPTION = """\ |
|
MultiDomain Agreement (Leonardelli |
|
et al., 2021) is a hate speech classification dataset of |
|
English tweets from three domains of Black Lives |
|
Matter, Election, and Covid-19, with a particular |
|
focus on tweets that potentially leads to disagree- |
|
ment.""" |
|
|
|
_GOE_DESCRIPTION = """\ |
|
Go Emotions (Demszky et al., 2020) is a |
|
fine-grained emotion classification corpus of care- |
|
fully curated comments extracted from Reddit. We |
|
group emotions into four categories following sen- |
|
timent level divides in the original paper.""" |
|
|
|
_HUM_DESCRIPTION = """\ |
|
Humor (Simpson et al., 2019) is a corpus |
|
of online texts for pairwise humorousness compari- |
|
son""" |
|
|
|
_COM_DESCRIPTION = """\ |
|
CommitmentBank (De Marneffe et al., |
|
2019) is an NLI dataset. It contains naturally oc- |
|
curring discourses whose final sentence contains |
|
a clause-embedding predicate under an entailment |
|
canceling operator (question, modal, negation, an- |
|
tecedent of conditional).""" |
|
|
|
_SNT_DESCRIPTION = """\ |
|
Sentiment Analysis (Díaz et al., 2018) is a |
|
sentiment classification dataset originally used to |
|
detect age-related sentiments.""" |
|
|
|
_ANNOTATION_SPLIT_DESCRIPTION = """\ |
|
Annotation Split: |
|
We split the annotations for each annotator into train and test set. |
|
|
|
In other words, the same set of annotators appear in both train, (val), |
|
and test sets. |
|
|
|
For datasets that have splits originally, we follow the original split and remove |
|
datapoints in test sets that are annotated by an annotator who is not in |
|
the training set. |
|
|
|
For datasets that do not have splits originally, we split the data into |
|
train and test set for convenience, you may further split the train set |
|
into a train and val set. |
|
""" |
|
|
|
_ANNOTATOR_SPLIT_DESCRIPTION = """\ |
|
Annotator Split: |
|
We split annotators into train and test set. |
|
|
|
In other words, a different set of annotators would appear in train and test sets. |
|
|
|
We split the data into train and test set for convenience, you may consider |
|
further splitting the train set into a train and val set for performance validation. |
|
""" |
|
|
|
|
|
_FIA_CITATION = """\ |
|
@inproceedings{damgaard-etal-2021-ill, |
|
title = "{``}{I}{'}ll be there for you{''}: The One with Understanding Indirect Answers", |
|
author = "Damgaard, Cathrine and |
|
Toborek, Paulina and |
|
Eriksen, Trine and |
|
Plank, Barbara", |
|
booktitle = "Proceedings of the 2nd Workshop on Computational Approaches to Discourse", |
|
month = nov, |
|
year = "2021", |
|
address = "Punta Cana, Dominican Republic and Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.codi-main.1", |
|
doi = "10.18653/v1/2021.codi-main.1", |
|
pages = "1--11", |
|
}""" |
|
|
|
_PEJ_CITATION = """\ |
|
@inproceedings{dinu-etal-2021-computational-exploration, |
|
title = "A Computational Exploration of Pejorative Language in Social Media", |
|
author = "Dinu, Liviu P. and |
|
Iordache, Ioan-Bogdan and |
|
Uban, Ana Sabina and |
|
Zampieri, Marcos", |
|
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021", |
|
month = nov, |
|
year = "2021", |
|
address = "Punta Cana, Dominican Republic", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.findings-emnlp.296", |
|
doi = "10.18653/v1/2021.findings-emnlp.296", |
|
pages = "3493--3498" |
|
}""" |
|
|
|
_HSB_CITATION = """\ |
|
@article{akhtar2021whose, |
|
title={Whose opinions matter? perspective-aware models to identify opinions of hate speech victims in abusive language detection}, |
|
author={Akhtar, Sohail and Basile, Valerio and Patti, Viviana}, |
|
journal={arXiv preprint arXiv:2106.15896}, |
|
year={2021} |
|
}""" |
|
|
|
_MDA_CITATION = """\ |
|
@inproceedings{leonardelli-etal-2021-agreeing, |
|
title = "Agreeing to Disagree: Annotating Offensive Language Datasets with Annotators{'} Disagreement", |
|
author = "Leonardelli, Elisa and. Menini, Stefano and |
|
Palmero Aprosio, Alessio and |
|
Guerini, Marco and |
|
Tonelli, Sara", |
|
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
month = nov, |
|
year = "2021", |
|
address = "Online and Punta Cana, Dominican Republic", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.emnlp-main.822", |
|
pages = "10528--10539", |
|
}""" |
|
|
|
_GOE_CITATION = """\ |
|
@inproceedings{demszky-etal-2020-goemotions, |
|
title = "{G}o{E}motions: A Dataset of Fine-Grained Emotions", |
|
author = "Demszky, Dorottya and |
|
Movshovitz-Attias, Dana and |
|
Ko, Jeongwoo and |
|
Cowen, Alan and |
|
Nemade, Gaurav and |
|
Ravi, Sujith", |
|
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
month = jul, |
|
year = "2020", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2020.acl-main.372", |
|
doi = "10.18653/v1/2020.acl-main.372", |
|
pages = "4040--4054" |
|
}""" |
|
|
|
_HUM_CITATION = """\ |
|
@inproceedings{simpson-etal-2019-predicting, |
|
title = "Predicting Humorousness and Metaphor Novelty with {G}aussian Process Preference Learning", |
|
author = "Simpson, Edwin and |
|
Do Dinh, Erik-L{\^a}n and |
|
Miller, Tristan and |
|
Gurevych, Iryna", |
|
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
month = jul, |
|
year = "2019", |
|
address = "Florence, Italy", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/P19-1572", |
|
doi = "10.18653/v1/P19-1572", |
|
pages = "5716--5728" |
|
}""" |
|
|
|
_COM_CITATION = """\ |
|
@inproceedings{de2019commitmentbank, |
|
title={The commitmentbank: Investigating projection in naturally occurring discourse}, |
|
author={De Marneffe, Marie-Catherine and Simons, Mandy and Tonhauser, Judith}, |
|
booktitle={proceedings of Sinn und Bedeutung}, |
|
volume={23}, |
|
number={2}, |
|
pages={107--124}, |
|
year={2019} |
|
}""" |
|
|
|
_SNT_CITATION = """\ |
|
@inproceedings{diaz2018addressing, |
|
title={Addressing age-related bias in sentiment analysis}, |
|
author={D{\'\i}az, Mark and Johnson, Isaac and Lazar, Amanda and Piper, Anne Marie and Gergle, Darren}, |
|
booktitle={Proceedings of the 2018 chi conference on human factors in computing systems}, |
|
pages={1--14}, |
|
year={2018} |
|
}""" |
|
|
|
|
|
|
|
class TID8Config(datasets.BuilderConfig): |
|
"""BuilderConfig for TID-8.""" |
|
|
|
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"),\ |
|
task=None, **kwargs): |
|
"""BuilderConfig for TID-8. |
|
Args: |
|
features: `list[string]`, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
data_url: `string`, url to download the zip file from. |
|
citation: `string`, citation for the data set. |
|
url: `string`, url for information about the data set. |
|
label_classes: `list[string]`, the list of classes for the label if the |
|
label is present as a string. Non-string labels will be cast to either |
|
'False' or 'True'. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
|
|
super(TID8Config, self).__init__(version=datasets.Version("1.0.3"), **kwargs) |
|
self.features = features |
|
self.label_classes = label_classes |
|
self.data_url = data_url |
|
self.citation = citation |
|
self.url = url |
|
self.task = task |
|
|
|
BASE_URL = "https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data" |
|
|
|
class TID8Glue(datasets.GeneratorBasedBuilder): |
|
"""The TID-8 benchmark.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
TID8Config( |
|
name="friends_qia-ann", |
|
description=_FIA_DESCRIPTION, |
|
features=["Season", "Episode", "Category", "Q_person", \ |
|
"A_person", "Q_original", "Q_modified", "A_modified", "Annotation_1", "Annotation_2", \ |
|
"Annotation_3", "Goldstandard"], |
|
label_classes=["1", "2", "3", "4", "5"], |
|
data_url=f"{BASE_URL}/friends_qia-ann.zip", |
|
citation=_FIA_CITATION, |
|
url="https://github.com/friendsQIA/Friends_QIA", |
|
task="indirect_ans" |
|
), |
|
TID8Config( |
|
name="pejorative-ann", |
|
description=_PEJ_DESCRIPTION, |
|
features=["pejor_word", "word_definition", "annotator-1", "annotator-2", "annotator-3"], |
|
label_classes=["pejorative", "non-pejorative", "undecided"], |
|
data_url=f"{BASE_URL}/pejorative-ann.zip", |
|
citation=_PEJ_CITATION, |
|
url="https://nlp.unibuc.ro/resources.html", |
|
task="pejorative" |
|
), |
|
TID8Config( |
|
name="hs_brexit-ann", |
|
description=_HSB_DESCRIPTION, |
|
features=["other annotations"], |
|
label_classes=["hate_speech", "not_hate_speech"], |
|
data_url=f"{BASE_URL}/hs_brexit-ann.zip", |
|
citation=_HSB_CITATION, |
|
url="https://le-wi-di.github.io/", |
|
task="hs_brexit" |
|
), |
|
TID8Config( |
|
name="md-agreement-ann", |
|
description=_MDA_DESCRIPTION, |
|
features=["task", "original_id", "domain"], |
|
label_classes=["offensive_speech", "not_offensive_speech"], |
|
data_url=f"{BASE_URL}/md-agreement-ann.zip", |
|
citation=_MDA_CITATION, |
|
url="https://le-wi-di.github.io/", |
|
task="offensive" |
|
), |
|
TID8Config( |
|
name="goemotions-ann", |
|
description=_GOE_DESCRIPTION, |
|
features=["author", "subreddit", "link_id", "parent_id", "created_utc", "rater_id", \ |
|
"example_very_unclear", "admiration", "amusement", "anger", "annoyance", "approval", \ |
|
"caring", "confusion", "curiosity", "desire", "disappointment", "disapproval", \ |
|
"disgust", "embarrassment", "excitement", "fear", "gratitude", "grief", "joy", \ |
|
"love", "nervousness", "optimism", "pride", "realization", "relief", "remorse", \ |
|
"sadness", "surprise", "neutral"], |
|
label_classes=["positive", "ambiguous", "negative", "neutral"], |
|
data_url=f"{BASE_URL}/goemotions-ann.zip", |
|
citation=_GOE_CITATION, |
|
url="https://github.com/google-research/google-research/tree/master/goemotions", |
|
task="emotion" |
|
), |
|
TID8Config( |
|
name="humor-ann", |
|
description=_HUM_DESCRIPTION, |
|
features=["text_a", "text_b"], |
|
label_classes=["B", "X", "A"], |
|
data_url=f"{BASE_URL}/humor-ann.zip", |
|
citation=_HUM_CITATION, |
|
url="https://github.com/ukplab/acl2019-GPPL-humour-metaphor", |
|
task="humor" |
|
), |
|
TID8Config( |
|
name="commitmentbank-ann", |
|
description=_COM_DESCRIPTION, |
|
|
|
features=["HitID", "Verb", "Context", "Prompt", "Target", "ModalType", \ |
|
"Embedding", "MatTense", "weak_labels"], |
|
label_classes=["0", "1", "2", "3", "-3", "-1", "-2"], |
|
data_url=f"{BASE_URL}/commitmentbank-ann.zip", |
|
citation=_COM_CITATION, |
|
url="https://github.com/mcdm/CommitmentBank", |
|
task="certainty" |
|
), |
|
TID8Config( |
|
name="sentiment-ann", |
|
description=_SNT_DESCRIPTION, |
|
features=[], |
|
label_classes=["Neutral", "Somewhat positive", "Very negative", "Somewhat negative", "Very positive"], |
|
data_url=f"{BASE_URL}/sentiment-ann.zip", |
|
citation=_SNT_CITATION, |
|
url="https://dataverse.harvard.edu/dataverse/algorithm-age-bias", |
|
task="sentiment" |
|
), |
|
TID8Config( |
|
name="friends_qia-atr", |
|
description=_FIA_DESCRIPTION, |
|
features=["Season", "Episode", "Category", "Q_person", \ |
|
"A_person", "Q_original", "Q_modified", "A_modified", "Annotation_1", "Annotation_2", \ |
|
"Annotation_3", "Goldstandard"], |
|
label_classes=["1", "2", "3", "4", "5"], |
|
data_url=f"{BASE_URL}/friends_qia-atr.zip", |
|
citation=_FIA_CITATION, |
|
url="https://github.com/friendsQIA/Friends_QIA", |
|
task="indirect_ans" |
|
), |
|
TID8Config( |
|
name="pejorative-atr", |
|
description=_PEJ_DESCRIPTION, |
|
features=["pejor_word", "word_definition", "annotator-1", "annotator-2", "annotator-3"], |
|
label_classes=["pejorative", "non-pejorative", "undecided"], |
|
data_url=f"{BASE_URL}/pejorative-atr.zip", |
|
citation=_PEJ_CITATION, |
|
url="https://nlp.unibuc.ro/resources.html", |
|
task="pejorative" |
|
), |
|
TID8Config( |
|
name="hs_brexit-atr", |
|
description=_HSB_DESCRIPTION, |
|
features=["other annotations"], |
|
label_classes=["hate_speech", "not_hate_speech"], |
|
data_url=f"{BASE_URL}/hs_brexit-atr.zip", |
|
citation=_HSB_CITATION, |
|
url="https://le-wi-di.github.io/", |
|
task="hs_brexit" |
|
), |
|
TID8Config( |
|
name="md-agreement-atr", |
|
description=_MDA_DESCRIPTION, |
|
features=["task", "original_id", "domain"], |
|
label_classes=["offensive_speech", "not_offensive_speech"], |
|
data_url=f"{BASE_URL}/md-agreement-atr.zip", |
|
citation=_MDA_CITATION, |
|
url="https://le-wi-di.github.io/", |
|
task="offensive" |
|
), |
|
TID8Config( |
|
name="goemotions-atr", |
|
description=_GOE_DESCRIPTION, |
|
features=["author", "subreddit", "link_id", "parent_id", "created_utc", "rater_id", \ |
|
"example_very_unclear", "admiration", "amusement", "anger", "annoyance", "approval", \ |
|
"caring", "confusion", "curiosity", "desire", "disappointment", "disapproval", \ |
|
"disgust", "embarrassment", "excitement", "fear", "gratitude", "grief", "joy", \ |
|
"love", "nervousness", "optimism", "pride", "realization", "relief", "remorse", \ |
|
"sadness", "surprise", "neutral"], |
|
label_classes=["positive", "ambiguous", "negative", "neutral"], |
|
data_url=f"{BASE_URL}/goemotions-atr.zip", |
|
citation=_GOE_CITATION, |
|
url="https://github.com/google-research/google-research/tree/master/goemotions", |
|
task="emotion" |
|
), |
|
TID8Config( |
|
name="humor-atr", |
|
description=_HUM_DESCRIPTION, |
|
features=["text_a", "text_b"], |
|
label_classes=["B", "X", "A"], |
|
data_url=f"{BASE_URL}/humor-atr.zip", |
|
citation=_HUM_CITATION, |
|
url="https://github.com/ukplab/acl2019-GPPL-humour-metaphor", |
|
task="humor" |
|
), |
|
TID8Config( |
|
name="commitmentbank-atr", |
|
description=_COM_DESCRIPTION, |
|
|
|
features=["HitID", "Verb", "Context", "Prompt", "Target", "ModalType", \ |
|
"Embedding", "MatTense", "weak_labels"], |
|
label_classes=["0", "1", "2", "3", "-3", "-1", "-2"], |
|
data_url=f"{BASE_URL}/commitmentbank-atr.zip", |
|
citation=_COM_CITATION, |
|
url="https://github.com/mcdm/CommitmentBank", |
|
task="certainty" |
|
), |
|
TID8Config( |
|
name="sentiment-atr", |
|
description=_SNT_DESCRIPTION, |
|
features=[], |
|
label_classes=["Neutral", "Somewhat positive", "Very negative", "Somewhat negative", "Very positive"], |
|
data_url=f"{BASE_URL}/sentiment-atr.zip", |
|
citation=_SNT_CITATION, |
|
url="https://dataverse.harvard.edu/dataverse/algorithm-age-bias", |
|
task="sentiment" |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = {} |
|
for feature in self.config.features: |
|
if "commitmentbank" in self.config.name and feature == "weak_labels": |
|
features[feature] = datasets.features.Sequence(datasets.Value("string")) |
|
elif "hate_speech_brexit" in self.config.name and feature == "other annotations": |
|
features[feature] = datasets.features.Sequence(datasets.Value("string")) |
|
else: |
|
features[feature] = datasets.Value("string") |
|
|
|
features["question"] = datasets.Value("string") |
|
features["uid"] = datasets.Value("string") |
|
features["id"] = datasets.Value("int32") |
|
features["annotator_id"] = datasets.Value("string") |
|
features["answer"] = datasets.Value("string") |
|
features["answer_label"] = datasets.features.ClassLabel(names=self.config.label_classes) |
|
|
|
additional_split_descr = None |
|
if self.config.name.endswith("-ann"): |
|
additional_split_descr = _ANNOTATION_SPLIT_DESCRIPTION |
|
else: |
|
assert self.config.name.endswith("-atr") |
|
additional_split_descr = _ANNOTATOR_SPLIT_DESCRIPTION |
|
return datasets.DatasetInfo( |
|
description=_TID_8_DESCRIPTION + "\n" + self.config.description + "\n" + additional_split_descr, |
|
features=datasets.Features(features), |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + _TID_8_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dl_dir = dl_manager.download_and_extract(self.config.data_url) or "" |
|
|
|
splits = [] |
|
if self.config.name in {"friends_qia-ann", "multi-domain-agreement-ann"}: |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, self.config.name, "dev.jsonl"), |
|
"split": datasets.Split.VALIDATION, |
|
}, |
|
), |
|
) |
|
splits.extend([ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, self.config.name, "train.jsonl"), |
|
"split": datasets.Split.TRAIN, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, self.config.name, "test.jsonl"), |
|
"split": datasets.Split.TEST, |
|
}, |
|
), |
|
]) |
|
return splits |
|
|
|
|
|
def _generate_examples(self, data_file, split): |
|
with open(data_file, encoding="utf-8") as f: |
|
for i, line in enumerate(f): |
|
row = json.loads(line) |
|
example = { |
|
"id": row["id"], |
|
"uid": row["uid"], |
|
"answer": row[self.config.task], |
|
"answer_label": row[self.config.task], |
|
"annotator_id": row["respondent_id"], |
|
"question": row["sentence"] |
|
} |
|
for feature in self.config.features: |
|
try: |
|
example[feature] = row[feature] |
|
except Exception: |
|
print(row) |
|
yield i, example |