Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
multiple-choice-qa
Languages:
English
Size:
1M - 10M
ArXiv:
License:
Add 'logical_fallacies' config data files
Browse files
README.md
CHANGED
|
@@ -1173,6 +1173,8 @@ dataset_info:
|
|
| 1173 |
features:
|
| 1174 |
- name: question
|
| 1175 |
dtype: string
|
|
|
|
|
|
|
| 1176 |
- name: choices
|
| 1177 |
sequence: string
|
| 1178 |
- name: answer
|
|
@@ -1185,19 +1187,19 @@ dataset_info:
|
|
| 1185 |
'3': D
|
| 1186 |
splits:
|
| 1187 |
- name: auxiliary_train
|
| 1188 |
-
num_bytes:
|
| 1189 |
num_examples: 99842
|
| 1190 |
- name: test
|
| 1191 |
-
num_bytes:
|
| 1192 |
num_examples: 163
|
| 1193 |
- name: validation
|
| 1194 |
-
num_bytes:
|
| 1195 |
num_examples: 18
|
| 1196 |
- name: dev
|
| 1197 |
-
num_bytes:
|
| 1198 |
num_examples: 5
|
| 1199 |
-
download_size:
|
| 1200 |
-
dataset_size:
|
| 1201 |
- config_name: machine_learning
|
| 1202 |
features:
|
| 1203 |
- name: question
|
|
@@ -2149,6 +2151,16 @@ configs:
|
|
| 2149 |
path: jurisprudence/validation-*
|
| 2150 |
- split: dev
|
| 2151 |
path: jurisprudence/dev-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2152 |
---
|
| 2153 |
|
| 2154 |
# Dataset Card for MMLU
|
|
|
|
| 1173 |
features:
|
| 1174 |
- name: question
|
| 1175 |
dtype: string
|
| 1176 |
+
- name: subject
|
| 1177 |
+
dtype: string
|
| 1178 |
- name: choices
|
| 1179 |
sequence: string
|
| 1180 |
- name: answer
|
|
|
|
| 1187 |
'3': D
|
| 1188 |
splits:
|
| 1189 |
- name: auxiliary_train
|
| 1190 |
+
num_bytes: 161000625
|
| 1191 |
num_examples: 99842
|
| 1192 |
- name: test
|
| 1193 |
+
num_bytes: 53528
|
| 1194 |
num_examples: 163
|
| 1195 |
- name: validation
|
| 1196 |
+
num_bytes: 5469
|
| 1197 |
num_examples: 18
|
| 1198 |
- name: dev
|
| 1199 |
+
num_bytes: 1666
|
| 1200 |
num_examples: 5
|
| 1201 |
+
download_size: 47196007
|
| 1202 |
+
dataset_size: 161061288
|
| 1203 |
- config_name: machine_learning
|
| 1204 |
features:
|
| 1205 |
- name: question
|
|
|
|
| 2151 |
path: jurisprudence/validation-*
|
| 2152 |
- split: dev
|
| 2153 |
path: jurisprudence/dev-*
|
| 2154 |
+
- config_name: logical_fallacies
|
| 2155 |
+
data_files:
|
| 2156 |
+
- split: auxiliary_train
|
| 2157 |
+
path: logical_fallacies/auxiliary_train-*
|
| 2158 |
+
- split: test
|
| 2159 |
+
path: logical_fallacies/test-*
|
| 2160 |
+
- split: validation
|
| 2161 |
+
path: logical_fallacies/validation-*
|
| 2162 |
+
- split: dev
|
| 2163 |
+
path: logical_fallacies/dev-*
|
| 2164 |
---
|
| 2165 |
|
| 2166 |
# Dataset Card for MMLU
|
dataset_infos.json
CHANGED
|
@@ -2527,39 +2527,34 @@
|
|
| 2527 |
"features": {
|
| 2528 |
"question": {
|
| 2529 |
"dtype": "string",
|
| 2530 |
-
"
|
|
|
|
|
|
|
|
|
|
| 2531 |
"_type": "Value"
|
| 2532 |
},
|
| 2533 |
"choices": {
|
| 2534 |
"feature": {
|
| 2535 |
"dtype": "string",
|
| 2536 |
-
"id": null,
|
| 2537 |
"_type": "Value"
|
| 2538 |
},
|
| 2539 |
-
"length": -1,
|
| 2540 |
-
"id": null,
|
| 2541 |
"_type": "Sequence"
|
| 2542 |
},
|
| 2543 |
"answer": {
|
| 2544 |
-
"num_classes": 4,
|
| 2545 |
"names": [
|
| 2546 |
"A",
|
| 2547 |
"B",
|
| 2548 |
"C",
|
| 2549 |
"D"
|
| 2550 |
],
|
| 2551 |
-
"id": null,
|
| 2552 |
"_type": "ClassLabel"
|
| 2553 |
}
|
| 2554 |
},
|
| 2555 |
-
"
|
| 2556 |
-
"
|
| 2557 |
-
"task_templates": null,
|
| 2558 |
-
"builder_name": "mmlu",
|
| 2559 |
"config_name": "logical_fallacies",
|
| 2560 |
"version": {
|
| 2561 |
"version_str": "1.0.0",
|
| 2562 |
-
"description": null,
|
| 2563 |
"major": 1,
|
| 2564 |
"minor": 0,
|
| 2565 |
"patch": 0
|
|
@@ -2567,39 +2562,32 @@
|
|
| 2567 |
"splits": {
|
| 2568 |
"auxiliary_train": {
|
| 2569 |
"name": "auxiliary_train",
|
| 2570 |
-
"num_bytes":
|
| 2571 |
"num_examples": 99842,
|
| 2572 |
-
"dataset_name":
|
| 2573 |
},
|
| 2574 |
"test": {
|
| 2575 |
"name": "test",
|
| 2576 |
-
"num_bytes":
|
| 2577 |
"num_examples": 163,
|
| 2578 |
-
"dataset_name":
|
| 2579 |
},
|
| 2580 |
"validation": {
|
| 2581 |
"name": "validation",
|
| 2582 |
-
"num_bytes":
|
| 2583 |
"num_examples": 18,
|
| 2584 |
-
"dataset_name":
|
| 2585 |
},
|
| 2586 |
"dev": {
|
| 2587 |
"name": "dev",
|
| 2588 |
-
"num_bytes":
|
| 2589 |
"num_examples": 5,
|
| 2590 |
-
"dataset_name":
|
| 2591 |
-
}
|
| 2592 |
-
},
|
| 2593 |
-
"download_checksums": {
|
| 2594 |
-
"data.tar": {
|
| 2595 |
-
"num_bytes": 166184960,
|
| 2596 |
-
"checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b"
|
| 2597 |
}
|
| 2598 |
},
|
| 2599 |
-
"download_size":
|
| 2600 |
-
"
|
| 2601 |
-
"
|
| 2602 |
-
"size_in_bytes": 326842974
|
| 2603 |
},
|
| 2604 |
"machine_learning": {
|
| 2605 |
"description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",
|
|
|
|
| 2527 |
"features": {
|
| 2528 |
"question": {
|
| 2529 |
"dtype": "string",
|
| 2530 |
+
"_type": "Value"
|
| 2531 |
+
},
|
| 2532 |
+
"subject": {
|
| 2533 |
+
"dtype": "string",
|
| 2534 |
"_type": "Value"
|
| 2535 |
},
|
| 2536 |
"choices": {
|
| 2537 |
"feature": {
|
| 2538 |
"dtype": "string",
|
|
|
|
| 2539 |
"_type": "Value"
|
| 2540 |
},
|
|
|
|
|
|
|
| 2541 |
"_type": "Sequence"
|
| 2542 |
},
|
| 2543 |
"answer": {
|
|
|
|
| 2544 |
"names": [
|
| 2545 |
"A",
|
| 2546 |
"B",
|
| 2547 |
"C",
|
| 2548 |
"D"
|
| 2549 |
],
|
|
|
|
| 2550 |
"_type": "ClassLabel"
|
| 2551 |
}
|
| 2552 |
},
|
| 2553 |
+
"builder_name": "parquet",
|
| 2554 |
+
"dataset_name": "mmlu",
|
|
|
|
|
|
|
| 2555 |
"config_name": "logical_fallacies",
|
| 2556 |
"version": {
|
| 2557 |
"version_str": "1.0.0",
|
|
|
|
| 2558 |
"major": 1,
|
| 2559 |
"minor": 0,
|
| 2560 |
"patch": 0
|
|
|
|
| 2562 |
"splits": {
|
| 2563 |
"auxiliary_train": {
|
| 2564 |
"name": "auxiliary_train",
|
| 2565 |
+
"num_bytes": 161000625,
|
| 2566 |
"num_examples": 99842,
|
| 2567 |
+
"dataset_name": null
|
| 2568 |
},
|
| 2569 |
"test": {
|
| 2570 |
"name": "test",
|
| 2571 |
+
"num_bytes": 53528,
|
| 2572 |
"num_examples": 163,
|
| 2573 |
+
"dataset_name": null
|
| 2574 |
},
|
| 2575 |
"validation": {
|
| 2576 |
"name": "validation",
|
| 2577 |
+
"num_bytes": 5469,
|
| 2578 |
"num_examples": 18,
|
| 2579 |
+
"dataset_name": null
|
| 2580 |
},
|
| 2581 |
"dev": {
|
| 2582 |
"name": "dev",
|
| 2583 |
+
"num_bytes": 1666,
|
| 2584 |
"num_examples": 5,
|
| 2585 |
+
"dataset_name": null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2586 |
}
|
| 2587 |
},
|
| 2588 |
+
"download_size": 47196007,
|
| 2589 |
+
"dataset_size": 161061288,
|
| 2590 |
+
"size_in_bytes": 208257295
|
|
|
|
| 2591 |
},
|
| 2592 |
"machine_learning": {
|
| 2593 |
"description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",
|
logical_fallacies/auxiliary_train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c2782fc860f57d9345a9233ab04f494b0af5ae85b893a27853f7014b14a3bd07
|
| 3 |
+
size 47163955
|
logical_fallacies/dev-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d99f6e2fc9e9efc9b5c53205a8d8bfb19621c4e68e1831374b875c491d3e33eb
|
| 3 |
+
size 3620
|
logical_fallacies/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51071098e4867f370919e59e0cf1ec16b6befebc1c8e1af1532fae8c9514ec9c
|
| 3 |
+
size 22548
|
logical_fallacies/validation-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eeb9a15a1d70f4eb3ed8ad970158de5aa0a1ce365fd448114159216239a07d73
|
| 3 |
+
size 5884
|