added hate labels
Browse files- .gitignore +1 -0
- NOTE.md +1 -1
- data/tweet_nerd/test.jsonl +0 -0
- data/tweet_nerd/train.jsonl +0 -0
- get_stats.py +4 -0
- process/tweet_sentiment.py +0 -1
- super_tweet_eval.py +7 -11
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
misc
|
|
|
|
1 |
misc
|
2 |
+
dimos.ipynb
|
NOTE.md
CHANGED
@@ -4,4 +4,4 @@
|
|
4 |
- TweetNER: [link](https://huggingface.co/datasets/tner/tweetner7)
|
5 |
- TweetQA: [link](https://huggingface.co/datasets/tweet_qa), [link2](https://huggingface.co/datasets/lmqg/qg_tweetqa)
|
6 |
- TweetIntimacy: [link]()
|
7 |
-
|
|
|
4 |
- TweetNER: [link](https://huggingface.co/datasets/tner/tweetner7)
|
5 |
- TweetQA: [link](https://huggingface.co/datasets/tweet_qa), [link2](https://huggingface.co/datasets/lmqg/qg_tweetqa)
|
6 |
- TweetIntimacy: [link]()
|
7 |
+
- TweetSentiment: [link](https://alt.qcri.org/semeval2017/task4/index.php?id=results)
|
data/tweet_nerd/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/tweet_nerd/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
get_stats.py
CHANGED
@@ -9,6 +9,10 @@ task_description = {
|
|
9 |
'tweet_similarity': "regression on two texts",
|
10 |
'tweet_topic': "multi-label classification",
|
11 |
"tempo_wic": "binary classification on two texts"
|
|
|
|
|
|
|
|
|
12 |
}
|
13 |
for task in task_description.keys():
|
14 |
data = load_dataset("cardiffnlp/super_tweet_eval", task)
|
|
|
9 |
'tweet_similarity': "regression on two texts",
|
10 |
'tweet_topic': "multi-label classification",
|
11 |
"tempo_wic": "binary classification on two texts"
|
12 |
+
"tweet_sentiment": "ABSA on a five-point scale"
|
13 |
+
"tweet_hate": "multi-class classification"
|
14 |
+
"tweet_emoji": "multi-class classification"
|
15 |
+
"tweet_nerd": "binary classification"
|
16 |
}
|
17 |
for task in task_description.keys():
|
18 |
data = load_dataset("cardiffnlp/super_tweet_eval", task)
|
process/tweet_sentiment.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
# Original data: https://alt.qcri.org/semeval2017/task4/index.php?id=results
|
2 |
import pandas as pd
|
3 |
from glob import glob
|
4 |
import urllib
|
|
|
|
|
1 |
import pandas as pd
|
2 |
from glob import glob
|
3 |
import urllib
|
super_tweet_eval.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
import json
|
3 |
import datasets
|
4 |
|
5 |
-
_VERSION = "0.1.
|
6 |
_SUPER_TWEET_EVAL_CITATION = """TBA"""
|
7 |
_SUPER_TWEET_EVAL_DESCRIPTION = """TBA"""
|
8 |
_TWEET_TOPIC_DESCRIPTION = """
|
@@ -279,11 +279,11 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
279 |
features["text_1_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
280 |
features["text_2_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
281 |
if self.config.name == "tweet_hate":
|
282 |
-
|
283 |
-
'
|
284 |
-
'
|
285 |
-
|
286 |
-
features["gold_label"] = datasets.Value("int32")
|
287 |
features["text"] = datasets.Value("string")
|
288 |
if self.config.name == "tweet_nerd":
|
289 |
features['target'] = datasets.Value("string")
|
@@ -308,11 +308,7 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
308 |
)
|
309 |
|
310 |
def _split_generators(self, dl_manager):
|
311 |
-
|
312 |
-
if self.config.name == 'tweet_nerd':
|
313 |
-
splits = ['validation']
|
314 |
-
else:
|
315 |
-
splits = ['train', 'test', 'validation']
|
316 |
downloaded_file = dl_manager.download_and_extract({s: f"{self.config.data_url}/{s}.jsonl" for s in splits})
|
317 |
return [datasets.SplitGenerator(name=s, gen_kwargs={"filepath": downloaded_file[s]}) for s in splits]
|
318 |
|
|
|
2 |
import json
|
3 |
import datasets
|
4 |
|
5 |
+
_VERSION = "0.1.33"
|
6 |
_SUPER_TWEET_EVAL_CITATION = """TBA"""
|
7 |
_SUPER_TWEET_EVAL_DESCRIPTION = """TBA"""
|
8 |
_TWEET_TOPIC_DESCRIPTION = """
|
|
|
279 |
features["text_1_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
280 |
features["text_2_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
281 |
if self.config.name == "tweet_hate":
|
282 |
+
label_classes = [
|
283 |
+
'hate_gender','hate_race', 'hate_sexuality', 'hate_religion','hate_origin', 'hate_disability',
|
284 |
+
'target_age', 'not_hate']
|
285 |
+
features['gold_label'] = datasets.features.ClassLabel(names=self.config.label_classes)
|
286 |
+
#features["gold_label"] = datasets.Value("int32")
|
287 |
features["text"] = datasets.Value("string")
|
288 |
if self.config.name == "tweet_nerd":
|
289 |
features['target'] = datasets.Value("string")
|
|
|
308 |
)
|
309 |
|
310 |
def _split_generators(self, dl_manager):
|
311 |
+
splits = ['train', 'test', 'validation']
|
|
|
|
|
|
|
|
|
312 |
downloaded_file = dl_manager.download_and_extract({s: f"{self.config.data_url}/{s}.jsonl" for s in splits})
|
313 |
return [datasets.SplitGenerator(name=s, gen_kwargs={"filepath": downloaded_file[s]}) for s in splits]
|
314 |
|