Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
multiple-choice-qa
Languages:
English
Size:
1M - 10M
ArXiv:
License:
Add 'jurisprudence' config data files
Browse files
README.md
CHANGED
|
@@ -1142,6 +1142,8 @@ dataset_info:
|
|
| 1142 |
features:
|
| 1143 |
- name: question
|
| 1144 |
dtype: string
|
|
|
|
|
|
|
| 1145 |
- name: choices
|
| 1146 |
sequence: string
|
| 1147 |
- name: answer
|
|
@@ -1154,19 +1156,19 @@ dataset_info:
|
|
| 1154 |
'3': D
|
| 1155 |
splits:
|
| 1156 |
- name: auxiliary_train
|
| 1157 |
-
num_bytes:
|
| 1158 |
num_examples: 99842
|
| 1159 |
- name: test
|
| 1160 |
-
num_bytes:
|
| 1161 |
num_examples: 108
|
| 1162 |
- name: validation
|
| 1163 |
-
num_bytes:
|
| 1164 |
num_examples: 11
|
| 1165 |
- name: dev
|
| 1166 |
-
num_bytes:
|
| 1167 |
num_examples: 5
|
| 1168 |
-
download_size:
|
| 1169 |
-
dataset_size:
|
| 1170 |
- config_name: logical_fallacies
|
| 1171 |
features:
|
| 1172 |
- name: question
|
|
@@ -2137,6 +2139,16 @@ configs:
|
|
| 2137 |
path: international_law/validation-*
|
| 2138 |
- split: dev
|
| 2139 |
path: international_law/dev-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2140 |
---
|
| 2141 |
|
| 2142 |
# Dataset Card for MMLU
|
|
|
|
| 1142 |
features:
|
| 1143 |
- name: question
|
| 1144 |
dtype: string
|
| 1145 |
+
- name: subject
|
| 1146 |
+
dtype: string
|
| 1147 |
- name: choices
|
| 1148 |
sequence: string
|
| 1149 |
- name: answer
|
|
|
|
| 1156 |
'3': D
|
| 1157 |
splits:
|
| 1158 |
- name: auxiliary_train
|
| 1159 |
+
num_bytes: 161000625
|
| 1160 |
num_examples: 99842
|
| 1161 |
- name: test
|
| 1162 |
+
num_bytes: 35810
|
| 1163 |
num_examples: 108
|
| 1164 |
- name: validation
|
| 1165 |
+
num_bytes: 3904
|
| 1166 |
num_examples: 11
|
| 1167 |
- name: dev
|
| 1168 |
+
num_bytes: 1376
|
| 1169 |
num_examples: 5
|
| 1170 |
+
download_size: 47195740
|
| 1171 |
+
dataset_size: 161041715
|
| 1172 |
- config_name: logical_fallacies
|
| 1173 |
features:
|
| 1174 |
- name: question
|
|
|
|
| 2139 |
path: international_law/validation-*
|
| 2140 |
- split: dev
|
| 2141 |
path: international_law/dev-*
|
| 2142 |
+
- config_name: jurisprudence
|
| 2143 |
+
data_files:
|
| 2144 |
+
- split: auxiliary_train
|
| 2145 |
+
path: jurisprudence/auxiliary_train-*
|
| 2146 |
+
- split: test
|
| 2147 |
+
path: jurisprudence/test-*
|
| 2148 |
+
- split: validation
|
| 2149 |
+
path: jurisprudence/validation-*
|
| 2150 |
+
- split: dev
|
| 2151 |
+
path: jurisprudence/dev-*
|
| 2152 |
---
|
| 2153 |
|
| 2154 |
# Dataset Card for MMLU
|
dataset_infos.json
CHANGED
|
@@ -2457,39 +2457,34 @@
|
|
| 2457 |
"features": {
|
| 2458 |
"question": {
|
| 2459 |
"dtype": "string",
|
| 2460 |
-
"
|
|
|
|
|
|
|
|
|
|
| 2461 |
"_type": "Value"
|
| 2462 |
},
|
| 2463 |
"choices": {
|
| 2464 |
"feature": {
|
| 2465 |
"dtype": "string",
|
| 2466 |
-
"id": null,
|
| 2467 |
"_type": "Value"
|
| 2468 |
},
|
| 2469 |
-
"length": -1,
|
| 2470 |
-
"id": null,
|
| 2471 |
"_type": "Sequence"
|
| 2472 |
},
|
| 2473 |
"answer": {
|
| 2474 |
-
"num_classes": 4,
|
| 2475 |
"names": [
|
| 2476 |
"A",
|
| 2477 |
"B",
|
| 2478 |
"C",
|
| 2479 |
"D"
|
| 2480 |
],
|
| 2481 |
-
"id": null,
|
| 2482 |
"_type": "ClassLabel"
|
| 2483 |
}
|
| 2484 |
},
|
| 2485 |
-
"
|
| 2486 |
-
"
|
| 2487 |
-
"task_templates": null,
|
| 2488 |
-
"builder_name": "mmlu",
|
| 2489 |
"config_name": "jurisprudence",
|
| 2490 |
"version": {
|
| 2491 |
"version_str": "1.0.0",
|
| 2492 |
-
"description": null,
|
| 2493 |
"major": 1,
|
| 2494 |
"minor": 0,
|
| 2495 |
"patch": 0
|
|
@@ -2497,39 +2492,32 @@
|
|
| 2497 |
"splits": {
|
| 2498 |
"auxiliary_train": {
|
| 2499 |
"name": "auxiliary_train",
|
| 2500 |
-
"num_bytes":
|
| 2501 |
"num_examples": 99842,
|
| 2502 |
-
"dataset_name":
|
| 2503 |
},
|
| 2504 |
"test": {
|
| 2505 |
"name": "test",
|
| 2506 |
-
"num_bytes":
|
| 2507 |
"num_examples": 108,
|
| 2508 |
-
"dataset_name":
|
| 2509 |
},
|
| 2510 |
"validation": {
|
| 2511 |
"name": "validation",
|
| 2512 |
-
"num_bytes":
|
| 2513 |
"num_examples": 11,
|
| 2514 |
-
"dataset_name":
|
| 2515 |
},
|
| 2516 |
"dev": {
|
| 2517 |
"name": "dev",
|
| 2518 |
-
"num_bytes":
|
| 2519 |
"num_examples": 5,
|
| 2520 |
-
"dataset_name":
|
| 2521 |
-
}
|
| 2522 |
-
},
|
| 2523 |
-
"download_checksums": {
|
| 2524 |
-
"data.tar": {
|
| 2525 |
-
"num_bytes": 166184960,
|
| 2526 |
-
"checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b"
|
| 2527 |
}
|
| 2528 |
},
|
| 2529 |
-
"download_size":
|
| 2530 |
-
"
|
| 2531 |
-
"
|
| 2532 |
-
"size_in_bytes": 326825199
|
| 2533 |
},
|
| 2534 |
"logical_fallacies": {
|
| 2535 |
"description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",
|
|
|
|
| 2457 |
"features": {
|
| 2458 |
"question": {
|
| 2459 |
"dtype": "string",
|
| 2460 |
+
"_type": "Value"
|
| 2461 |
+
},
|
| 2462 |
+
"subject": {
|
| 2463 |
+
"dtype": "string",
|
| 2464 |
"_type": "Value"
|
| 2465 |
},
|
| 2466 |
"choices": {
|
| 2467 |
"feature": {
|
| 2468 |
"dtype": "string",
|
|
|
|
| 2469 |
"_type": "Value"
|
| 2470 |
},
|
|
|
|
|
|
|
| 2471 |
"_type": "Sequence"
|
| 2472 |
},
|
| 2473 |
"answer": {
|
|
|
|
| 2474 |
"names": [
|
| 2475 |
"A",
|
| 2476 |
"B",
|
| 2477 |
"C",
|
| 2478 |
"D"
|
| 2479 |
],
|
|
|
|
| 2480 |
"_type": "ClassLabel"
|
| 2481 |
}
|
| 2482 |
},
|
| 2483 |
+
"builder_name": "parquet",
|
| 2484 |
+
"dataset_name": "mmlu",
|
|
|
|
|
|
|
| 2485 |
"config_name": "jurisprudence",
|
| 2486 |
"version": {
|
| 2487 |
"version_str": "1.0.0",
|
|
|
|
| 2488 |
"major": 1,
|
| 2489 |
"minor": 0,
|
| 2490 |
"patch": 0
|
|
|
|
| 2492 |
"splits": {
|
| 2493 |
"auxiliary_train": {
|
| 2494 |
"name": "auxiliary_train",
|
| 2495 |
+
"num_bytes": 161000625,
|
| 2496 |
"num_examples": 99842,
|
| 2497 |
+
"dataset_name": null
|
| 2498 |
},
|
| 2499 |
"test": {
|
| 2500 |
"name": "test",
|
| 2501 |
+
"num_bytes": 35810,
|
| 2502 |
"num_examples": 108,
|
| 2503 |
+
"dataset_name": null
|
| 2504 |
},
|
| 2505 |
"validation": {
|
| 2506 |
"name": "validation",
|
| 2507 |
+
"num_bytes": 3904,
|
| 2508 |
"num_examples": 11,
|
| 2509 |
+
"dataset_name": null
|
| 2510 |
},
|
| 2511 |
"dev": {
|
| 2512 |
"name": "dev",
|
| 2513 |
+
"num_bytes": 1376,
|
| 2514 |
"num_examples": 5,
|
| 2515 |
+
"dataset_name": null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2516 |
}
|
| 2517 |
},
|
| 2518 |
+
"download_size": 47195740,
|
| 2519 |
+
"dataset_size": 161041715,
|
| 2520 |
+
"size_in_bytes": 208237455
|
|
|
|
| 2521 |
},
|
| 2522 |
"logical_fallacies": {
|
| 2523 |
"description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",
|
jurisprudence/auxiliary_train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c2782fc860f57d9345a9233ab04f494b0af5ae85b893a27853f7014b14a3bd07
|
| 3 |
+
size 47163955
|
jurisprudence/dev-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a51dc98513e10c1e98bde78351eb32265742f2f7085dc6674bc36bb60491310
|
| 3 |
+
size 3504
|
jurisprudence/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:edf4e12ef25a4ef0caa43e254cee665fa9341ec8e8fd4265c5a295cb2dd9ca2d
|
| 3 |
+
size 22744
|
jurisprudence/validation-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc2d42f4de60752fb2a92bd985bb4259438c594f947ee6fda5afdc14294fa2f0
|
| 3 |
+
size 5537
|