Datasets:
ArXiv:
License:
# coding=utf-8 | |
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
"""FeedbackQA: An Retrieval-based Question Answering Dataset with User Feedback""" | |
import json | |
import datasets | |
import os | |
logger = datasets.logging.get_logger(__name__) | |
_CITATION = """ | |
""" | |
_DESCRIPTION = """\ | |
FeedbackQA is a retrieval-based QA dataset \ | |
that contains interactive feedback from users. \ | |
It has two parts: the first part contains a conventional RQA dataset, \ | |
whilst this repo contains the second part, which contains feedback(ratings and natural language explanations) for QA pairs. | |
""" | |
#_URLS = { | |
# "train": "https://cdn-lfs.huggingface.co/datasets/McGill-NLP/FeedbackQA/46bd763229fc603d73f634a312367acb83c3b713a5dfd9fcf8a9b3e310c39a67", | |
# "dev": "https://cdn-lfs.huggingface.co/datasets/McGill-NLP/FeedbackQA/40a93282e5fdee4706c20e32ddd4734151139d67f6844dbcffb9e7be22ae6b8f", | |
# "test": "https://cdn-lfs.huggingface.co/datasets/McGill-NLP/FeedbackQA/50c4a21dc778cf064f731161e2213f21d2951cabd9331a1c524f791055040d02" | |
#} | |
_URL = 'https://drive.google.com/uc?export=download&id=14KV6yKgdjzb6fbFzshGuNvEp9zGv_gol' | |
class FeedbackConfig(datasets.BuilderConfig): | |
"""BuilderConfig for FeedbackQA.""" | |
def __init__(self, **kwargs): | |
"""BuilderConfig for FeedbackQA. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(FeedbackConfig, self).__init__(**kwargs) | |
class FeedbackQA(datasets.GeneratorBasedBuilder): | |
"""FeedbackQA: retrieval-based QA dataset that contains interactive feedback from users.""" | |
BUILDER_CONFIGS = [ | |
FeedbackConfig( | |
name="plain_text", | |
version=datasets.Version("1.0.0", ""), | |
description="Plain text", | |
), | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
#"id": datasets.Value("string"), | |
#"title": datasets.Value("string"), | |
"question": datasets.Value("string"), | |
"answer": datasets.Value("string"), | |
"feedback": datasets.features.Sequence( | |
{ | |
"rating": datasets.Value("string"), | |
"explanation": datasets.Value("string"), | |
} | |
), | |
} | |
), | |
# No default supervised_keys (as we have to pass both question | |
# and context as input). | |
supervised_keys=None, | |
homepage="https://mcgill-nlp.github.io/feedbackQA_data/", | |
citation=_CITATION | |
) | |
def _split_generators(self, dl_manager): | |
downloaded_files_path = dl_manager.download_and_extract(_URL) | |
train_file = os.path.join(downloaded_files_path, 'feedback_train.json') | |
val_file = os.path.join(downloaded_files_path, 'feedback_valid.json') | |
test_file = os.path.join(downloaded_files_path, 'feedback_test.json') | |
print(test_file) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_file}), | |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_file}), | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_file}), | |
] | |
def _generate_examples(self, filepath): | |
"""This function returns the examples in the raw (text) form.""" | |
logger.info("generating examples from = %s", filepath) | |
key = 0 | |
with open(filepath, encoding="utf-8") as f: | |
fbqa = json.load(f) | |
for dict_item in fbqa: | |
question = dict_item['question'] | |
passage_text = '' | |
if dict_item['passage']['reference']['page_title']: | |
passage_text += dict_item['passage']['reference']['page_title'] + '\n' | |
if dict_item['passage']['reference']['section_headers']: | |
passage_text += '\n'.join(dict_item['passage']['reference']['section_headers']) + '\n' | |
if dict_item['passage']['reference']['section_content']: | |
passage_text += dict_item['passage']['reference']['section_content'] | |
yield key, { | |
"question": question, | |
"answer": passage_text, | |
"feedback": { | |
"rating": dict_item['rating'], | |
"explanation": dict_item['feedback'], | |
}, | |
} | |
key += 1 |