Commit
·
8663a53
0
Parent(s):
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- dataset_infos.json +1 -0
- dummy/plain_text/1.0.0/dummy_data.zip +3 -0
- multi_nli.py +135 -0
.gitattributes
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
dataset_infos.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"plain_text": {"description": "The Multi-Genre Natural Language Inference (MultiNLI) corpus is a\ncrowd-sourced collection of 433k sentence pairs annotated with textual\nentailment information. The corpus is modeled on the SNLI corpus, but differs in\nthat covers a range of genres of spoken and written text, and supports a\ndistinctive cross-genre generalization evaluation. The corpus served as the\nbasis for the shared task of the RepEval 2017 Workshop at EMNLP in Copenhagen.\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n", "homepage": "https://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "multi_nli", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 73245222, "num_examples": 392702, "dataset_name": "multi_nli"}, "validation_matched": {"name": "validation_matched", "num_bytes": 1799439, "num_examples": 9815, "dataset_name": "multi_nli"}, "validation_mismatched": {"name": "validation_mismatched", "num_bytes": 1914827, "num_examples": 9832, "dataset_name": "multi_nli"}}, "download_checksums": {"http://storage.googleapis.com/tfds-data/downloads/multi_nli/multinli_1.0.zip": {"num_bytes": 226850426, "checksum": "049f507b9e36b1fcb756cfd5aeb3b7a0cfcb84bf023793652987f7e7e0957822"}}, "download_size": 226850426, "dataset_size": 76959488, "size_in_bytes": 303809914}}
|
dummy/plain_text/1.0.0/dummy_data.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fa7aca81ea7db5408b84d967c291a89f721e82e0b4eef3563e48ec8edf347e8
|
| 3 |
+
size 1276
|
multi_nli.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
# Lint as: python3
|
| 17 |
+
"""The Multi-Genre NLI Corpus."""
|
| 18 |
+
|
| 19 |
+
from __future__ import absolute_import, division, print_function
|
| 20 |
+
|
| 21 |
+
import os
|
| 22 |
+
|
| 23 |
+
import datasets
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_CITATION = """\
|
| 27 |
+
@InProceedings{N18-1101,
|
| 28 |
+
author = {Williams, Adina
|
| 29 |
+
and Nangia, Nikita
|
| 30 |
+
and Bowman, Samuel},
|
| 31 |
+
title = {A Broad-Coverage Challenge Corpus for
|
| 32 |
+
Sentence Understanding through Inference},
|
| 33 |
+
booktitle = {Proceedings of the 2018 Conference of
|
| 34 |
+
the North American Chapter of the
|
| 35 |
+
Association for Computational Linguistics:
|
| 36 |
+
Human Language Technologies, Volume 1 (Long
|
| 37 |
+
Papers)},
|
| 38 |
+
year = {2018},
|
| 39 |
+
publisher = {Association for Computational Linguistics},
|
| 40 |
+
pages = {1112--1122},
|
| 41 |
+
location = {New Orleans, Louisiana},
|
| 42 |
+
url = {http://aclweb.org/anthology/N18-1101}
|
| 43 |
+
}
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
_DESCRIPTION = """\
|
| 47 |
+
The Multi-Genre Natural Language Inference (MultiNLI) corpus is a
|
| 48 |
+
crowd-sourced collection of 433k sentence pairs annotated with textual
|
| 49 |
+
entailment information. The corpus is modeled on the SNLI corpus, but differs in
|
| 50 |
+
that covers a range of genres of spoken and written text, and supports a
|
| 51 |
+
distinctive cross-genre generalization evaluation. The corpus served as the
|
| 52 |
+
basis for the shared task of the RepEval 2017 Workshop at EMNLP in Copenhagen.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class MultiNLIConfig(datasets.BuilderConfig):
|
| 57 |
+
"""BuilderConfig for MultiNLI."""
|
| 58 |
+
|
| 59 |
+
def __init__(self, **kwargs):
|
| 60 |
+
"""BuilderConfig for MultiNLI.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
.
|
| 64 |
+
**kwargs: keyword arguments forwarded to super.
|
| 65 |
+
"""
|
| 66 |
+
super(MultiNLIConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class MultiNli(datasets.GeneratorBasedBuilder):
|
| 70 |
+
"""MultiNLI: The Stanford Question Answering Dataset. Version 1.1."""
|
| 71 |
+
|
| 72 |
+
BUILDER_CONFIGS = [
|
| 73 |
+
MultiNLIConfig(
|
| 74 |
+
name="plain_text",
|
| 75 |
+
description="Plain text",
|
| 76 |
+
),
|
| 77 |
+
]
|
| 78 |
+
|
| 79 |
+
def _info(self):
|
| 80 |
+
return datasets.DatasetInfo(
|
| 81 |
+
description=_DESCRIPTION,
|
| 82 |
+
features=datasets.Features(
|
| 83 |
+
{
|
| 84 |
+
"premise": datasets.Value("string"),
|
| 85 |
+
"hypothesis": datasets.Value("string"),
|
| 86 |
+
"label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
|
| 87 |
+
}
|
| 88 |
+
),
|
| 89 |
+
# No default supervised_keys (as we have to pass both premise
|
| 90 |
+
# and hypothesis as input).
|
| 91 |
+
supervised_keys=None,
|
| 92 |
+
homepage="https://www.nyu.edu/projects/bowman/multinli/",
|
| 93 |
+
citation=_CITATION,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
def _vocab_text_gen(self, filepath):
|
| 97 |
+
for _, ex in self._generate_examples(filepath):
|
| 98 |
+
yield " ".join([ex["premise"], ex["hypothesis"]])
|
| 99 |
+
|
| 100 |
+
def _split_generators(self, dl_manager):
|
| 101 |
+
|
| 102 |
+
downloaded_dir = dl_manager.download_and_extract(
|
| 103 |
+
"http://storage.googleapis.com/tfds-data/downloads/multi_nli/multinli_1.0.zip"
|
| 104 |
+
)
|
| 105 |
+
mnli_path = os.path.join(downloaded_dir, "multinli_1.0")
|
| 106 |
+
train_path = os.path.join(mnli_path, "multinli_1.0_train.txt")
|
| 107 |
+
matched_validation_path = os.path.join(mnli_path, "multinli_1.0_dev_matched.txt")
|
| 108 |
+
mismatched_validation_path = os.path.join(mnli_path, "multinli_1.0_dev_mismatched.txt")
|
| 109 |
+
|
| 110 |
+
return [
|
| 111 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
|
| 112 |
+
datasets.SplitGenerator(name="validation_matched", gen_kwargs={"filepath": matched_validation_path}),
|
| 113 |
+
datasets.SplitGenerator(name="validation_mismatched", gen_kwargs={"filepath": mismatched_validation_path}),
|
| 114 |
+
]
|
| 115 |
+
|
| 116 |
+
def _generate_examples(self, filepath):
|
| 117 |
+
"""Generate mnli examples.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
filepath: a string
|
| 121 |
+
|
| 122 |
+
Yields:
|
| 123 |
+
dictionaries containing "premise", "hypothesis" and "label" strings
|
| 124 |
+
"""
|
| 125 |
+
for idx, line in enumerate(open(filepath, "rb")):
|
| 126 |
+
if idx == 0:
|
| 127 |
+
continue # skip header
|
| 128 |
+
line = line.strip().decode("utf-8")
|
| 129 |
+
split_line = line.split("\t")
|
| 130 |
+
# Examples not marked with a three out of five consensus are marked with
|
| 131 |
+
# "-" and should not be used in standard evaluations.
|
| 132 |
+
if split_line[0] == "-":
|
| 133 |
+
continue
|
| 134 |
+
# Works for both splits even though dev has some extra human labels.
|
| 135 |
+
yield idx, {"premise": split_line[5], "hypothesis": split_line[6], "label": split_line[0]}
|