added evaluation script/updated README
Browse files- README.md +30 -25
- evaluation.py +306 -0
- super_tweeteval.py +7 -8
README.md
CHANGED
|
@@ -41,20 +41,20 @@ More details on the task and an evaluation of language models can be found on th
|
|
| 41 |
|
| 42 |
All tasks provide custom training, validation and test splits.
|
| 43 |
|
| 44 |
-
| task
|
| 45 |
-
|
| 46 |
-
| tweet_topic | multi-label classification | 4,585 / 573 / 1,679 |
|
| 47 |
-
| tweet_ner7 | sequence labeling | 4,616 / 576 / 2,807 |
|
| 48 |
-
| tweet_qa | generation | 9,489 / 1,086 / 1,203
|
| 49 |
-
| tweet_qg | generation | 9,489 / 1,086 / 1,203
|
| 50 |
-
| tweet_intimacy | regression on a single text | 1,191 / 396 / 396
|
| 51 |
-
| tweet_similarity | regression on two texts | 450 / 100 / 450
|
| 52 |
-
| tempo_wic | binary classification on two texts | 1,427 / 395 / 1,472 |
|
| 53 |
-
| tweet_hate | multi-class classification | 5,019 / 716 / 1,433 |
|
| 54 |
-
| tweet_emoji | multi-class classification | 50,000 / 5,000 / 50,000 |
|
| 55 |
-
| tweet_sentiment | ABSA on a five-pointscale | 26,632 / 4,000 / 12,379
|
| 56 |
-
| tweet_nerd | binary classification | 20,164 / 4,100 / 20,075
|
| 57 |
-
| tweet_emotion | multi-label classification
|
| 58 |
|
| 59 |
## Dataset Structure
|
| 60 |
### Data Fields
|
|
@@ -134,26 +134,29 @@ In the following we present the information contained in each of the datasets.
|
|
| 134 |
|
| 135 |
|
| 136 |
## Evaluation Metrics
|
| 137 |
-
-
|
| 138 |
-
|
| 139 |
-
- __tweet_ner7:__ ```span-F1```
|
| 140 |
|
| 141 |
-
-
|
| 142 |
|
| 143 |
-
-
|
| 144 |
|
| 145 |
-
-
|
| 146 |
|
| 147 |
-
-
|
| 148 |
|
| 149 |
-
-
|
| 150 |
|
| 151 |
- __tweet_emoji:__ ``` accuracy at top 5 ```
|
| 152 |
|
| 153 |
-
-
|
| 154 |
|
| 155 |
-
-
|
| 156 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
|
| 159 |
## Citation Information
|
|
@@ -289,7 +292,9 @@ N/A
|
|
| 289 |
```
|
| 290 |
|
| 291 |
- TweetEmoji
|
| 292 |
-
```
|
|
|
|
|
|
|
| 293 |
|
| 294 |
- TweetSentiment
|
| 295 |
```
|
|
|
|
| 41 |
|
| 42 |
All tasks provide custom training, validation and test splits.
|
| 43 |
|
| 44 |
+
| **task** | **dataset** | **load dataset** | **description** | **number of instances** |
|
| 45 |
+
|----------------------------|----------------|------------------|------------------------------------|-------------------------|
|
| 46 |
+
| Topic Classification | TWEETTOPIC | tweet_topic | multi-label classification | 4,585 / 573 / 1,679 |
|
| 47 |
+
| NER | TWEETNER7 | tweet_ner7 | sequence labeling | 4,616 / 576 / 2,807 |
|
| 48 |
+
| Question Answering | TWEETQA | tweet_qa | generation | 9,489 / 1,086 / 1,203 |
|
| 49 |
+
| Question Generation | TWEETQG | tweet_qg | generation | 9,489 / 1,086 / 1,203 |
|
| 50 |
+
| Intimacy Analysis | TWEETINTIMACY | tweet_intimacy | regression on a single text | 1,191 / 396 / 396 |
|
| 51 |
+
| Tweet Similarity | TWEETSIM | tweet_similarity | regression on two texts | 450 / 100 / 450 |
|
| 52 |
+
| Meaning Shift Detection | TEMPOWIC | tempo_wic | binary classification on two texts | 1,427 / 395 / 1,472 |
|
| 53 |
+
| Hate Speech Detection | TWEETHATE | tweet_hate | multi-class classification | 5,019 / 716 / 1,433 |
|
| 54 |
+
| Emoji Classification | TWEETEMOJI100 | tweet_emoji | multi-class classification | 50,000 / 5,000 / 50,000 |
|
| 55 |
+
| Sentiment Classification | TWEETSENTIMENT | tweet_sentiment | ABSA on a five-pointscale | 26,632 / 4,000 / 12,379 |
|
| 56 |
+
| Name Entity Disambiguation | TWEETNERD | tweet_nerd | binary classification | 20,164 / 4,100 / 20,075 |
|
| 57 |
+
| Emotion Classification | TWEETEMOTION | tweet_emotion | multi-label classification | 6,838 / 886 / 3,259 |
|
| 58 |
|
| 59 |
## Dataset Structure
|
| 60 |
### Data Fields
|
|
|
|
| 134 |
|
| 135 |
|
| 136 |
## Evaluation Metrics
|
| 137 |
+
- __tweet_ner7:__ ```macro-F1```
|
|
|
|
|
|
|
| 138 |
|
| 139 |
+
- __tweet_emotion:__ ```macro-F1```
|
| 140 |
|
| 141 |
+
- __tweet_qg:__ ```METEOR```
|
| 142 |
|
| 143 |
+
- __tweet_nerd:__ ```accuracy```
|
| 144 |
|
| 145 |
+
- __tweet_sentiment:__ ```1 - MAE<sup>M</sup> (MAE<sup>M</sup>: Macro Averaged Mean Absolute Error )```
|
| 146 |
|
| 147 |
+
- __tempo_wic:__ ```accuracy```
|
| 148 |
|
| 149 |
- __tweet_emoji:__ ``` accuracy at top 5 ```
|
| 150 |
|
| 151 |
+
- __tweet_intimacy:__ ```spearman correlation```
|
| 152 |
|
| 153 |
+
- __tweet_qa:__ ```answer-F1```
|
| 154 |
|
| 155 |
+
- __tweet_topic:__ ```macro-F1```
|
| 156 |
+
|
| 157 |
+
- __tweet_hate:__ ```combined-F1 (micro-F1 for hate/not-hate & macro-F1 for hate speech subclasses)```
|
| 158 |
+
|
| 159 |
+
- __tweet_similarity:__ ```spearman correlation```
|
| 160 |
|
| 161 |
|
| 162 |
## Citation Information
|
|
|
|
| 292 |
```
|
| 293 |
|
| 294 |
- TweetEmoji
|
| 295 |
+
```
|
| 296 |
+
N/A
|
| 297 |
+
```
|
| 298 |
|
| 299 |
- TweetSentiment
|
| 300 |
```
|
evaluation.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from datasets import load_dataset
|
| 3 |
+
from imblearn.metrics import macro_averaged_mean_absolute_error
|
| 4 |
+
from sklearn.metrics import f1_score
|
| 5 |
+
from evaluate import load
|
| 6 |
+
import numpy as np
|
| 7 |
+
import argparse
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
|
| 12 |
+
|
| 13 |
+
# argument
|
| 14 |
+
parser = argparse.ArgumentParser(description='Super TweetEval evaluation script.')
|
| 15 |
+
parser.add_argument('-p', '--prediction-path', required=True, type=str,
|
| 16 |
+
help="a text file that contains the model prediction on the test set in each line")
|
| 17 |
+
parser.add_argument('-o', '--output-file', default="super_tweeteval_result.json", type=str, help="path to the output file")
|
| 18 |
+
parser.add_argument('--t2t-format', action="store_false", default=True, help="format prediction file in T2T format (ONLY for NER7)")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
opt = parser.parse_args()
|
| 22 |
+
|
| 23 |
+
task_names = ['tweet_topic', 'tweet_ner7', 'tweet_qa', 'tweet_qg',
|
| 24 |
+
'tweet_intimacy', 'tweet_similarity', 'tempo_wic',
|
| 25 |
+
'tweet_hate', 'tweet_nerd', 'tweet_emoji',
|
| 26 |
+
'tweet_sentiment', 'tweet_emotion']
|
| 27 |
+
|
| 28 |
+
scores = defaultdict(lambda : 0) #{k:0 for k in task_names}
|
| 29 |
+
not_found = []
|
| 30 |
+
|
| 31 |
+
for task in task_names:
|
| 32 |
+
# load dataset
|
| 33 |
+
data = load_dataset("cardiffnlp/super_tweeteval", task, use_auth_token=True, split="test")
|
| 34 |
+
try:
|
| 35 |
+
if task == 'tempo_wic':
|
| 36 |
+
label2id = {"no": 0, "yes": 1}
|
| 37 |
+
|
| 38 |
+
with open(f"{opt.prediction_path}/tempo-wic.txt") as f:
|
| 39 |
+
_predictions = []
|
| 40 |
+
output = f.read().split('\n')
|
| 41 |
+
for entry in output:
|
| 42 |
+
if entry in label2id:
|
| 43 |
+
_predictions.append(label2id[entry])
|
| 44 |
+
else:
|
| 45 |
+
_predictions.append(-1)
|
| 46 |
+
|
| 47 |
+
gold_labels = data["gold_label_binary"]
|
| 48 |
+
eval_metric = {"accuracy": np.mean([int(a == b) for a, b in zip(_predictions, gold_labels)])}
|
| 49 |
+
scores[task] = eval_metric["accuracy"]
|
| 50 |
+
elif task == "tweet_emoji":
|
| 51 |
+
with open('./emoji_map.csv') as f:
|
| 52 |
+
label_classes = f.readlines()
|
| 53 |
+
label_names = [x.strip('\n') for x in label_classes]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
label_names = [x.split(',')[1] for x in label_names]
|
| 57 |
+
|
| 58 |
+
with open(f"{opt.prediction_path}/tweet-emoji.txt") as f:
|
| 59 |
+
lines = f.readlines()
|
| 60 |
+
lines = [l.strip('\n') for l in lines]
|
| 61 |
+
predictions = []
|
| 62 |
+
|
| 63 |
+
for l in lines:
|
| 64 |
+
pred_instance = []
|
| 65 |
+
# consider only top 5 predictions
|
| 66 |
+
|
| 67 |
+
lines = l.split(',') if ',' in l else l.split(' ')
|
| 68 |
+
for label in lines[:5]:
|
| 69 |
+
label = label.strip(" ,")
|
| 70 |
+
if label in label_names:
|
| 71 |
+
pred_instance.append(label_names.index(label))
|
| 72 |
+
else:
|
| 73 |
+
pred_instance.append(-1) # emoji not in label_names
|
| 74 |
+
|
| 75 |
+
predictions.append(pred_instance)
|
| 76 |
+
|
| 77 |
+
# metric: accuracy at top 5
|
| 78 |
+
gold_labels = np.array(data["gold_label"][:40_000])
|
| 79 |
+
eval_metric = {"accuracy_top5": np.mean([1 if gold_labels[i] in predictions[i] else 0 for i in range(len(gold_labels))])}
|
| 80 |
+
scores[task] = eval_metric["accuracy_top5"]
|
| 81 |
+
elif task == "tweet_emotion":
|
| 82 |
+
label_names = data.features['gold_label_list'].feature.names
|
| 83 |
+
|
| 84 |
+
with open(f"{opt.prediction_path}/tweet-emotion.txt") as f:
|
| 85 |
+
lines = f.readlines()
|
| 86 |
+
lines = [l.strip('\n') for l in lines]
|
| 87 |
+
predictions = []
|
| 88 |
+
for l in lines:
|
| 89 |
+
pred_instance = [0] * len(label_names)
|
| 90 |
+
for label in l.split(','):
|
| 91 |
+
label = label.strip(' ')
|
| 92 |
+
if label in label_names:
|
| 93 |
+
pred_instance[label_names.index(label)] = 1
|
| 94 |
+
|
| 95 |
+
predictions.append(pred_instance)
|
| 96 |
+
|
| 97 |
+
# metric
|
| 98 |
+
gold_labels = data["gold_label_list"]
|
| 99 |
+
eval_metric = {"macro_f1": f1_score(gold_labels, predictions, average='macro')}
|
| 100 |
+
scores[task] = eval_metric["macro_f1"]
|
| 101 |
+
elif task == "tweet_ner7":
|
| 102 |
+
labels = [
|
| 103 |
+
'B-corporation', 'B-creative_work', 'B-event', 'B-group', 'B-location', 'B-person', 'B-product',
|
| 104 |
+
'I-corporation', 'I-creative_work', 'I-event', 'I-group', 'I-location', 'I-person', 'I-product', 'O'
|
| 105 |
+
]
|
| 106 |
+
id2label = {i: label for i, label in enumerate(labels)}
|
| 107 |
+
true_sequence = [[id2label[i] for i in ii] for ii in data['gold_label_sequence']]
|
| 108 |
+
|
| 109 |
+
# metric
|
| 110 |
+
metric = load("seqeval")
|
| 111 |
+
if opt.t2t_format:
|
| 112 |
+
# format prediction file in IOB sequence
|
| 113 |
+
with open(f"{opt.prediction_path}/tweet-ner7.txt") as f:
|
| 114 |
+
lines = f.read().split("\n")
|
| 115 |
+
output = [l.strip('\n') for l in lines]
|
| 116 |
+
output = [list(set(i.split(","))) for i in output]
|
| 117 |
+
prediction_sequence = []
|
| 118 |
+
for d, o in zip(data, output):
|
| 119 |
+
tag_seq = ['O'] * len(d['text_tokenized'])
|
| 120 |
+
for _o in o:
|
| 121 |
+
if len(_o.split(":")) != 2:
|
| 122 |
+
continue
|
| 123 |
+
entity, _type = _o.split(":")
|
| 124 |
+
entity_tokens = entity.split(" ")
|
| 125 |
+
try:
|
| 126 |
+
i = d['text_tokenized'].index(entity_tokens[0])
|
| 127 |
+
tag_seq[i] = f"B-{_type.strip()}"
|
| 128 |
+
if len(entity_tokens) > 1:
|
| 129 |
+
for j in range(1, len(entity_tokens)):
|
| 130 |
+
tag_seq[i + j] = f"I-{_type.strip()}"
|
| 131 |
+
except:
|
| 132 |
+
continue
|
| 133 |
+
prediction_sequence.append(tag_seq)
|
| 134 |
+
else:
|
| 135 |
+
with open(opt.prediction_file) as f:
|
| 136 |
+
prediction_sequence = [[id2label[j] if j in id2label else j for j in i.split('\t')] for i in f.read().split("\n")]
|
| 137 |
+
|
| 138 |
+
eval_metric = metric.compute(predictions=prediction_sequence, references=true_sequence)
|
| 139 |
+
eval_metric = {'overall_f1': eval_metric['overall_f1']}
|
| 140 |
+
scores[task] = eval_metric['overall_f1']
|
| 141 |
+
elif task == "tweet_hate":
|
| 142 |
+
label_names = data.features['gold_label'].names
|
| 143 |
+
|
| 144 |
+
with open(f"{opt.prediction_path}/tweet-hate.txt") as f:
|
| 145 |
+
lines = f.readlines()
|
| 146 |
+
output = [i.strip('\n') for i in lines]
|
| 147 |
+
predictions = []
|
| 148 |
+
for x in output:
|
| 149 |
+
if x not in label_names:
|
| 150 |
+
predictions.append(-1)
|
| 151 |
+
else:
|
| 152 |
+
predictions.append(label_names.index(x))
|
| 153 |
+
gold_labels = data["gold_label"]
|
| 154 |
+
# do not consider not_hate class
|
| 155 |
+
f1_multi = f1_score(gold_labels, predictions, labels=list(range(7)), average='macro')
|
| 156 |
+
|
| 157 |
+
# consider all hate subclasses as one class
|
| 158 |
+
predictions_binary = [1 if x in list(range(7)) else 0 for x in predictions]
|
| 159 |
+
gold_labels_binary = [1 if x in list(range(7)) else 0 for x in gold_labels]
|
| 160 |
+
f1_binary = f1_score(gold_labels_binary, predictions_binary, average='micro')
|
| 161 |
+
|
| 162 |
+
eval_metric = {"combined_f1": (f1_multi+f1_binary)/2}
|
| 163 |
+
scores[task] = eval_metric["combined_f1"]
|
| 164 |
+
|
| 165 |
+
elif task == "tweet_intimacy":
|
| 166 |
+
gold_labels = data["gold_score"]
|
| 167 |
+
# mean_value to be used if model outputs a non-numeric value
|
| 168 |
+
mean_value = sum(gold_labels)/len(gold_labels)
|
| 169 |
+
|
| 170 |
+
# metric
|
| 171 |
+
metric = load("spearmanr")
|
| 172 |
+
with open(f"{opt.prediction_path}/tweet-intimacy.txt") as f:
|
| 173 |
+
_predictions = []
|
| 174 |
+
lines = f.readlines()
|
| 175 |
+
output = [l.strip('\n') for l in lines]
|
| 176 |
+
for i in output:
|
| 177 |
+
try:
|
| 178 |
+
_predictions.append(float(i))
|
| 179 |
+
except ValueError:
|
| 180 |
+
_predictions.append(mean_value)
|
| 181 |
+
failed_predictions += 1
|
| 182 |
+
|
| 183 |
+
corr_spear = metric.compute(predictions=_predictions, references=gold_labels)
|
| 184 |
+
eval_metric = {"spearmanr": corr_spear}
|
| 185 |
+
scores[task] = eval_metric["spearmanr"]['spearmanr']
|
| 186 |
+
elif task == "tweet_nerd":
|
| 187 |
+
# metric
|
| 188 |
+
label2id = {"no": 0, "yes": 1}
|
| 189 |
+
with open(f"{opt.prediction_path}/tweet-nerd.txt") as f:
|
| 190 |
+
_predictions = []
|
| 191 |
+
output = f.read().split('\n')
|
| 192 |
+
output = [x.lower().strip() for x in output]
|
| 193 |
+
for entry in output:
|
| 194 |
+
if entry in label2id:
|
| 195 |
+
_predictions.append(label2id[entry])
|
| 196 |
+
else:
|
| 197 |
+
_predictions.append(-1)
|
| 198 |
+
|
| 199 |
+
gold_labels = data["gold_label_binary"]
|
| 200 |
+
eval_metric = {"accuracy": np.mean([int(a == b) for a, b in zip(_predictions, gold_labels)])}
|
| 201 |
+
scores[task] = eval_metric["accuracy"]
|
| 202 |
+
elif task == "tweet_qa":
|
| 203 |
+
metric = load("squad")
|
| 204 |
+
with open(f"{opt.prediction_path}/tweet-qa.txt") as f:
|
| 205 |
+
lines = f.readlines()
|
| 206 |
+
output = [l.strip('\n') for l in lines]
|
| 207 |
+
_predictions = [{"prediction_text": p, "id": str(_n)} for _n, p in enumerate(output)]
|
| 208 |
+
|
| 209 |
+
_references = [{"answers": {"answer_start": [100], "text": [r["gold_label_str"]]}, "id": str(_n)} for _n, r in enumerate(data)]
|
| 210 |
+
eval_metric = metric.compute(predictions=_predictions, references=_references)
|
| 211 |
+
eval_metric.pop("exact_match")
|
| 212 |
+
eval_metric["f1"] = eval_metric["f1"]/100
|
| 213 |
+
scores[task] = eval_metric["f1"]
|
| 214 |
+
elif task == "tweet_qg":
|
| 215 |
+
metric = load("meteor")
|
| 216 |
+
with open(f"{opt.prediction_path}/tweet-qg.txt") as f:
|
| 217 |
+
lines = f.readlines()
|
| 218 |
+
_predictions = [l.strip('\n') for l in lines]
|
| 219 |
+
_references = data["gold_label_str"]
|
| 220 |
+
eval_metric = metric.compute(predictions=_predictions, references=_references)
|
| 221 |
+
scores[task] = eval_metric["meteor"]
|
| 222 |
+
elif task == "tweet_sentiment":
|
| 223 |
+
label_names = data.features['gold_label'].names
|
| 224 |
+
with open(f"{opt.prediction_path}/tweet-sentiment.txt") as f:
|
| 225 |
+
lines = f.readlines()
|
| 226 |
+
output = [l.strip('\n') for l in lines]
|
| 227 |
+
predictions = []
|
| 228 |
+
# if the model outputs a label that is not in the label set, we set the label to be "neutral or negative" (2)
|
| 229 |
+
for x in output:
|
| 230 |
+
x = x.strip(' ')
|
| 231 |
+
if x not in label_names:
|
| 232 |
+
predictions.append(2)
|
| 233 |
+
else:
|
| 234 |
+
predictions.append(label_names.index(x))
|
| 235 |
+
|
| 236 |
+
# metric: r2 score
|
| 237 |
+
gold_labels = data["gold_label"]
|
| 238 |
+
|
| 239 |
+
macro_mae = macro_averaged_mean_absolute_error(gold_labels, predictions)
|
| 240 |
+
macro_mae = 1 - macro_mae
|
| 241 |
+
# set a floor of -1 for worst model
|
| 242 |
+
macro_mae = max([-1, macro_mae])
|
| 243 |
+
eval_metric = {"macro_mae": macro_mae}
|
| 244 |
+
scores[task] = eval_metric["macro_mae"]
|
| 245 |
+
elif task == "tweet_similarity":
|
| 246 |
+
gold_labels = data["gold_score"]
|
| 247 |
+
# mean_value to be used if model outputs a non-numeric value
|
| 248 |
+
mean_value = sum(gold_labels)/len(gold_labels)
|
| 249 |
+
# metric
|
| 250 |
+
metric = load("spearmanr")
|
| 251 |
+
with open(f"{opt.prediction_path}/tweet-similarity.txt") as f:
|
| 252 |
+
_predictions = []
|
| 253 |
+
lines = f.readlines()
|
| 254 |
+
output = [l.strip('\n') for l in lines]
|
| 255 |
+
for i in output:
|
| 256 |
+
try:
|
| 257 |
+
_predictions.append(float(i))
|
| 258 |
+
except ValueError:
|
| 259 |
+
_predictions.append(mean_value)
|
| 260 |
+
|
| 261 |
+
corr_spear = metric.compute(predictions=_predictions, references=gold_labels)
|
| 262 |
+
eval_metric = {"spearmanr": corr_spear}
|
| 263 |
+
scores[task] = eval_metric["spearmanr"]['spearmanr']
|
| 264 |
+
elif task == "tweet_topic":
|
| 265 |
+
label_names = data.features['gold_label_list'].feature.names
|
| 266 |
+
|
| 267 |
+
with open(f"{opt.prediction_path}/tweet-topic.txt") as f:
|
| 268 |
+
lines = f.readlines()
|
| 269 |
+
lines = [l.strip('\n') for l in lines]
|
| 270 |
+
predictions = []
|
| 271 |
+
for l in lines:
|
| 272 |
+
pred_instance = [0] * len(label_names)
|
| 273 |
+
for label in l.split(','):
|
| 274 |
+
label = label.strip(' ')
|
| 275 |
+
if label in label_names:
|
| 276 |
+
pred_instance[label_names.index(label)] = 1
|
| 277 |
+
|
| 278 |
+
predictions.append(pred_instance)
|
| 279 |
+
|
| 280 |
+
# metric
|
| 281 |
+
gold_labels = data["gold_label_list"]
|
| 282 |
+
eval_metric = {"macro_f1": f1_score(gold_labels, predictions, average='macro')}
|
| 283 |
+
scores[task] = eval_metric["macro_f1"]
|
| 284 |
+
except FileNotFoundError:
|
| 285 |
+
not_found.append(task)
|
| 286 |
+
continue
|
| 287 |
+
|
| 288 |
+
# clusters/groups to evaluate
|
| 289 |
+
subgroups = {
|
| 290 |
+
"temporal": ["tweet_ner7", "tempo_wic", "tweet_topic", "tweet_nerd"],
|
| 291 |
+
"temporal**": ["tempo_wic", "tweet_topic", "tweet_nerd"],
|
| 292 |
+
"multi-label": ["tweet_topic", "tweet_emotion"],
|
| 293 |
+
"multi-class": ["tweet_sentiment", "tweet_hate"],
|
| 294 |
+
"regression": ["tweet_similarity", "tweet_intimacy", "tweet_sentiment"],
|
| 295 |
+
"target-based": ["tweet_sentiment", "tempo_wic", "tweet_nerd"],
|
| 296 |
+
"big-label": ["tweet_emoji", "tweet_topic"],
|
| 297 |
+
"disambiguation": ["tempo_wic", "tweet_nerd"],
|
| 298 |
+
"generation": ["tweet_qa", "tweet_qg"],
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
scores = {k:round(v*100, 2) for k,v in scores.items()}
|
| 302 |
+
logging.info(f"Tasks not found: {not_found}")
|
| 303 |
+
logging.info(f"Scores: {scores}")
|
| 304 |
+
logging.info(f"Average score: {np.mean(list(scores.values()))}")
|
| 305 |
+
for group in subgroups:
|
| 306 |
+
logging.info(f"Average score {group}: {np.mean([scores[task] for task in subgroups[group] if task not in not_found])}")
|
super_tweeteval.py
CHANGED
|
@@ -341,14 +341,13 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
| 341 |
features['date'] = datasets.Value("string")
|
| 342 |
if self.config.name == "tweet_emoji":
|
| 343 |
# download class mapping
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
# names=label_classes)
|
| 352 |
features['gold_label'] = datasets.Value("int32")
|
| 353 |
features["text"] = datasets.Value("string")
|
| 354 |
if self.config.name == "tweet_sentiment":
|
|
|
|
| 341 |
features['date'] = datasets.Value("string")
|
| 342 |
if self.config.name == "tweet_emoji":
|
| 343 |
# download class mapping
|
| 344 |
+
dl_manager = datasets.utils.download_manager.DownloadManager()
|
| 345 |
+
url_map = "https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_emoji/map.txt"
|
| 346 |
+
with open(dl_manager.download(url_map)) as f:
|
| 347 |
+
label_classes = f.readlines()
|
| 348 |
+
label_classes = [x.strip('\n') for x in label_classes]
|
| 349 |
+
features['gold_label'] = datasets.features.ClassLabel(
|
| 350 |
+
names=label_classes)
|
|
|
|
| 351 |
features['gold_label'] = datasets.Value("int32")
|
| 352 |
features["text"] = datasets.Value("string")
|
| 353 |
if self.config.name == "tweet_sentiment":
|