Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
English
Size:
1K - 10K
ArXiv:
Tags:
conversational-qa
License:
Commit
·
e5cf57e
0
Parent(s):
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- coqa.py +99 -0
- dataset_infos.json +1 -0
- dummy/1.0.0/dummy_data.zip +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
coqa.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TODO(coqa): Add a description here."""
|
| 2 |
+
|
| 3 |
+
from __future__ import absolute_import, division, print_function
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
import datasets
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# TODO(coqa): BibTeX citation
|
| 11 |
+
_CITATION = """\
|
| 12 |
+
@InProceedings{SivaAndAl:Coca,
|
| 13 |
+
author = {Siva, Reddy and Danqi, Chen and Christopher D., Manning},
|
| 14 |
+
title = {WikiQA: A Challenge Dataset for Open-Domain Question Answering},
|
| 15 |
+
journal = { arXiv},
|
| 16 |
+
year = {2018},
|
| 17 |
+
|
| 18 |
+
}
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
# TODO(coqa):
|
| 22 |
+
_DESCRIPTION = """\
|
| 23 |
+
CoQA: A Conversational Question Answering Challenge
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
_TRAIN_DATA_URL = "https://datasets.stanford.edu/data/coqa/coqa-train-v1.0.json"
|
| 27 |
+
_DEV_DATA_URL = "https://datasets.stanford.edu/data/coqa/coqa-dev-v1.0.json"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Coqa(datasets.GeneratorBasedBuilder):
|
| 31 |
+
"""TODO(coqa): Short description of my dataset."""
|
| 32 |
+
|
| 33 |
+
# TODO(coqa): Set up version.
|
| 34 |
+
VERSION = datasets.Version("1.0.0")
|
| 35 |
+
|
| 36 |
+
def _info(self):
|
| 37 |
+
# TODO(coqa): Specifies the datasets.DatasetInfo object
|
| 38 |
+
return datasets.DatasetInfo(
|
| 39 |
+
# This is the description that will appear on the datasets page.
|
| 40 |
+
description=_DESCRIPTION,
|
| 41 |
+
# datasets.features.FeatureConnectors
|
| 42 |
+
features=datasets.Features(
|
| 43 |
+
{
|
| 44 |
+
"source": datasets.Value("string"),
|
| 45 |
+
"story": datasets.Value("string"),
|
| 46 |
+
"questions": datasets.features.Sequence(datasets.Value("string")),
|
| 47 |
+
"answers": datasets.features.Sequence(
|
| 48 |
+
{
|
| 49 |
+
"input_text": datasets.Value("string"),
|
| 50 |
+
"answer_start": datasets.Value("int32"),
|
| 51 |
+
"answer_end": datasets.Value("int32"),
|
| 52 |
+
}
|
| 53 |
+
),
|
| 54 |
+
}
|
| 55 |
+
),
|
| 56 |
+
# If there's a common (input, target) tuple from the features,
|
| 57 |
+
# specify them here. They'll be used if as_supervised=True in
|
| 58 |
+
# builder.as_dataset.
|
| 59 |
+
supervised_keys=None,
|
| 60 |
+
# Homepage of the dataset for documentation
|
| 61 |
+
homepage="https://stanfordnlp.github.io/coqa/",
|
| 62 |
+
citation=_CITATION,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def _split_generators(self, dl_manager):
|
| 66 |
+
"""Returns SplitGenerators."""
|
| 67 |
+
# TODO(coqa): Downloads the data and defines the splits
|
| 68 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to
|
| 69 |
+
# download and extract URLs
|
| 70 |
+
urls_to_download = {"train": _TRAIN_DATA_URL, "dev": _DEV_DATA_URL}
|
| 71 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
| 72 |
+
|
| 73 |
+
return [
|
| 74 |
+
datasets.SplitGenerator(
|
| 75 |
+
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}
|
| 76 |
+
),
|
| 77 |
+
datasets.SplitGenerator(
|
| 78 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "split": "validation"}
|
| 79 |
+
),
|
| 80 |
+
]
|
| 81 |
+
|
| 82 |
+
def _generate_examples(self, filepath, split):
|
| 83 |
+
"""Yields examples."""
|
| 84 |
+
# TODO(coqa): Yields (key, example) tuples from the dataset
|
| 85 |
+
with open(filepath, encoding="utf-8") as f:
|
| 86 |
+
data = json.load(f)
|
| 87 |
+
for row in data["data"]:
|
| 88 |
+
questions = [question["input_text"] for question in row["questions"]]
|
| 89 |
+
story = row["story"]
|
| 90 |
+
source = row["source"]
|
| 91 |
+
answers_start = [answer["span_start"] for answer in row["answers"]]
|
| 92 |
+
answers_end = [answer["span_end"] for answer in row["answers"]]
|
| 93 |
+
answers = [answer["input_text"] for answer in row["answers"]]
|
| 94 |
+
yield row["id"], {
|
| 95 |
+
"source": source,
|
| 96 |
+
"story": story,
|
| 97 |
+
"questions": questions,
|
| 98 |
+
"answers": {"input_text": answers, "answer_start": answers_start, "answer_end": answers_end},
|
| 99 |
+
}
|
dataset_infos.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"default": {"description": "CoQA: A Conversational Question Answering Challenge\n", "citation": "@InProceedings{SivaAndAl:Coca,\n author = {Siva, Reddy and Danqi, Chen and Christopher D., Manning},\n title = {WikiQA: A Challenge Dataset for Open-Domain Question Answering},\n journal = { arXiv},\n year = {2018},\n\n}\n", "homepage": "https://stanfordnlp.github.io/coqa/", "license": "", "features": {"source": {"dtype": "string", "id": null, "_type": "Value"}, "story": {"dtype": "string", "id": null, "_type": "Value"}, "questions": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answers": {"feature": {"input_text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "answer_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "coqa", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18014921, "num_examples": 7199, "dataset_name": "coqa"}, "validation": {"name": "validation", "num_bytes": 1227955, "num_examples": 500, "dataset_name": "coqa"}}, "download_checksums": {"https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json": {"num_bytes": 49001836, "checksum": "b0fdb2bc1bd38dd3ca2ce5fa2ac3e02c6288ac914f241ac409a655ffb6619fa6"}, "https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json": {"num_bytes": 9090845, "checksum": "dfa367a9733ce53222918d0231d9b3bedc2b8ee831a2845f62dfc70701f2540a"}}, "download_size": 58092681, "dataset_size": 19242876, "size_in_bytes": 77335557}}
|
dummy/1.0.0/dummy_data.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11b3fd16d8a3d82f9871119edd0448eb5b8373f535df692a069c0a41d99da48e
|
| 3 |
+
size 4224
|