Commit
·
9b98abe
1
Parent(s):
d0b8175
Add offensive data files
Browse files- README.md +13 -5
- dataset_infos.json +11 -44
- offensive/test-00000-of-00001.parquet +3 -0
- offensive/train-00000-of-00001.parquet +3 -0
- offensive/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
|
@@ -157,16 +157,16 @@ dataset_info:
|
|
| 157 |
'1': offensive
|
| 158 |
splits:
|
| 159 |
- name: train
|
| 160 |
-
num_bytes:
|
| 161 |
num_examples: 11916
|
| 162 |
- name: test
|
| 163 |
-
num_bytes:
|
| 164 |
num_examples: 860
|
| 165 |
- name: validation
|
| 166 |
-
num_bytes:
|
| 167 |
num_examples: 1324
|
| 168 |
-
download_size:
|
| 169 |
-
dataset_size:
|
| 170 |
- config_name: sentiment
|
| 171 |
features:
|
| 172 |
- name: text
|
|
@@ -338,6 +338,14 @@ configs:
|
|
| 338 |
path: irony/test-*
|
| 339 |
- split: validation
|
| 340 |
path: irony/validation-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
train-eval-index:
|
| 342 |
- config: emotion
|
| 343 |
task: text-classification
|
|
|
|
| 157 |
'1': offensive
|
| 158 |
splits:
|
| 159 |
- name: train
|
| 160 |
+
num_bytes: 1648061
|
| 161 |
num_examples: 11916
|
| 162 |
- name: test
|
| 163 |
+
num_bytes: 135473
|
| 164 |
num_examples: 860
|
| 165 |
- name: validation
|
| 166 |
+
num_bytes: 192417
|
| 167 |
num_examples: 1324
|
| 168 |
+
download_size: 1234528
|
| 169 |
+
dataset_size: 1975951
|
| 170 |
- config_name: sentiment
|
| 171 |
features:
|
| 172 |
- name: text
|
|
|
|
| 338 |
path: irony/test-*
|
| 339 |
- split: validation
|
| 340 |
path: irony/validation-*
|
| 341 |
+
- config_name: offensive
|
| 342 |
+
data_files:
|
| 343 |
+
- split: train
|
| 344 |
+
path: offensive/train-*
|
| 345 |
+
- split: test
|
| 346 |
+
path: offensive/test-*
|
| 347 |
+
- split: validation
|
| 348 |
+
path: offensive/validation-*
|
| 349 |
train-eval-index:
|
| 350 |
- config: emotion
|
| 351 |
task: text-classification
|
dataset_infos.json
CHANGED
|
@@ -231,27 +231,21 @@
|
|
| 231 |
"features": {
|
| 232 |
"text": {
|
| 233 |
"dtype": "string",
|
| 234 |
-
"id": null,
|
| 235 |
"_type": "Value"
|
| 236 |
},
|
| 237 |
"label": {
|
| 238 |
-
"num_classes": 2,
|
| 239 |
"names": [
|
| 240 |
"non-offensive",
|
| 241 |
"offensive"
|
| 242 |
],
|
| 243 |
-
"names_file": null,
|
| 244 |
-
"id": null,
|
| 245 |
"_type": "ClassLabel"
|
| 246 |
}
|
| 247 |
},
|
| 248 |
-
"
|
| 249 |
-
"
|
| 250 |
-
"builder_name": " tweet_eval",
|
| 251 |
"config_name": "offensive",
|
| 252 |
"version": {
|
| 253 |
"version_str": "1.1.0",
|
| 254 |
-
"description": null,
|
| 255 |
"major": 1,
|
| 256 |
"minor": 1,
|
| 257 |
"patch": 0
|
|
@@ -259,53 +253,26 @@
|
|
| 259 |
"splits": {
|
| 260 |
"train": {
|
| 261 |
"name": "train",
|
| 262 |
-
"num_bytes":
|
| 263 |
"num_examples": 11916,
|
| 264 |
-
"dataset_name":
|
| 265 |
},
|
| 266 |
"test": {
|
| 267 |
"name": "test",
|
| 268 |
-
"num_bytes":
|
| 269 |
"num_examples": 860,
|
| 270 |
-
"dataset_name":
|
| 271 |
},
|
| 272 |
"validation": {
|
| 273 |
"name": "validation",
|
| 274 |
-
"num_bytes":
|
| 275 |
"num_examples": 1324,
|
| 276 |
-
"dataset_name":
|
| 277 |
-
}
|
| 278 |
-
},
|
| 279 |
-
"download_checksums": {
|
| 280 |
-
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/train_text.txt": {
|
| 281 |
-
"num_bytes": 1529074,
|
| 282 |
-
"checksum": "78a7a32e38b10af7d8970b008bf17f661c8d0a90dad145fa0fa6a944669650db"
|
| 283 |
-
},
|
| 284 |
-
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/train_labels.txt": {
|
| 285 |
-
"num_bytes": 23832,
|
| 286 |
-
"checksum": "c0b7d6ebdaa4ebcf6fc557ef1e775d92eda160218a0e3b1dd48eb8234dc892a6"
|
| 287 |
-
},
|
| 288 |
-
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/test_text.txt": {
|
| 289 |
-
"num_bytes": 126921,
|
| 290 |
-
"checksum": "25b08c3333c26190f1023961c4508ec9aab24d4722b1a3ea7a6040724c120547"
|
| 291 |
-
},
|
| 292 |
-
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/test_labels.txt": {
|
| 293 |
-
"num_bytes": 1720,
|
| 294 |
-
"checksum": "41d05a7aa0b01f5dafab21b95adb4f979cb4226c046ff315702774d10dac1605"
|
| 295 |
-
},
|
| 296 |
-
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/val_text.txt": {
|
| 297 |
-
"num_bytes": 179188,
|
| 298 |
-
"checksum": "816f36d180c35f15a5104838cb73856a0bef42043482fe738f3481b06242a55c"
|
| 299 |
-
},
|
| 300 |
-
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/val_labels.txt": {
|
| 301 |
-
"num_bytes": 2648,
|
| 302 |
-
"checksum": "ed2deb776bd1c52fb8221fadd3360e32d9dfe46842d78053528126e46363a258"
|
| 303 |
}
|
| 304 |
},
|
| 305 |
-
"download_size":
|
| 306 |
-
"
|
| 307 |
-
"
|
| 308 |
-
"size_in_bytes": 3839350
|
| 309 |
},
|
| 310 |
"sentiment": {
|
| 311 |
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
|
|
|
|
| 231 |
"features": {
|
| 232 |
"text": {
|
| 233 |
"dtype": "string",
|
|
|
|
| 234 |
"_type": "Value"
|
| 235 |
},
|
| 236 |
"label": {
|
|
|
|
| 237 |
"names": [
|
| 238 |
"non-offensive",
|
| 239 |
"offensive"
|
| 240 |
],
|
|
|
|
|
|
|
| 241 |
"_type": "ClassLabel"
|
| 242 |
}
|
| 243 |
},
|
| 244 |
+
"builder_name": "tweet_eval",
|
| 245 |
+
"dataset_name": "tweet_eval",
|
|
|
|
| 246 |
"config_name": "offensive",
|
| 247 |
"version": {
|
| 248 |
"version_str": "1.1.0",
|
|
|
|
| 249 |
"major": 1,
|
| 250 |
"minor": 1,
|
| 251 |
"patch": 0
|
|
|
|
| 253 |
"splits": {
|
| 254 |
"train": {
|
| 255 |
"name": "train",
|
| 256 |
+
"num_bytes": 1648061,
|
| 257 |
"num_examples": 11916,
|
| 258 |
+
"dataset_name": null
|
| 259 |
},
|
| 260 |
"test": {
|
| 261 |
"name": "test",
|
| 262 |
+
"num_bytes": 135473,
|
| 263 |
"num_examples": 860,
|
| 264 |
+
"dataset_name": null
|
| 265 |
},
|
| 266 |
"validation": {
|
| 267 |
"name": "validation",
|
| 268 |
+
"num_bytes": 192417,
|
| 269 |
"num_examples": 1324,
|
| 270 |
+
"dataset_name": null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 271 |
}
|
| 272 |
},
|
| 273 |
+
"download_size": 1234528,
|
| 274 |
+
"dataset_size": 1975951,
|
| 275 |
+
"size_in_bytes": 3210479
|
|
|
|
| 276 |
},
|
| 277 |
"sentiment": {
|
| 278 |
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
|
offensive/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15b87adaf1850d8e3e104be6240255813d48d776940be546ad353fee9fcfb099
|
| 3 |
+
size 93731
|
offensive/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c2cb443cb5b956aa8578a6e18b2f9e50ce43217774e3cf171d53e16a1db81a3
|
| 3 |
+
size 1019132
|
offensive/validation-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1161b4e6cc7cbdaa667d4fc131f9be0ea3db755616126329dda7eb521cd88c36
|
| 3 |
+
size 121665
|