Convert dataset to Parquet

#4
by SaylorTwift HF Staff - opened
README.md CHANGED
@@ -18,7 +18,40 @@ task_categories:
18
  - question-answering
19
  task_ids:
20
  - extractive-qa
21
- # paperswithcode_id: faquad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  train-eval-index:
23
  - config: plain_text
24
  task: question-answering
@@ -33,8 +66,8 @@ train-eval-index:
33
  text: text
34
  answer_start: answer_start
35
  metrics:
36
- - type: squad
37
- name: SQuAD
38
  ---
39
 
40
  # Dataset Card for FaQuAD
 
18
  - question-answering
19
  task_ids:
20
  - extractive-qa
21
+ configs:
22
+ - config_name: plain_text
23
+ data_files:
24
+ - split: train
25
+ path: plain_text/train-*
26
+ - split: validation
27
+ path: plain_text/validation-*
28
+ default: true
29
+ dataset_info:
30
+ config_name: plain_text
31
+ features:
32
+ - name: id
33
+ dtype: string
34
+ - name: title
35
+ dtype: string
36
+ - name: context
37
+ dtype: string
38
+ - name: question
39
+ dtype: string
40
+ - name: answers
41
+ sequence:
42
+ - name: text
43
+ dtype: string
44
+ - name: answer_start
45
+ dtype: int32
46
+ splits:
47
+ - name: train
48
+ num_bytes: 975190
49
+ num_examples: 837
50
+ - name: validation
51
+ num_bytes: 90441
52
+ num_examples: 63
53
+ download_size: 236008
54
+ dataset_size: 1065631
55
  train-eval-index:
56
  - config: plain_text
57
  task: question-answering
 
66
  text: text
67
  answer_start: answer_start
68
  metrics:
69
+ - type: squad
70
+ name: SQuAD
71
  ---
72
 
73
  # Dataset Card for FaQuAD
faquad.py DELETED
@@ -1,149 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # Adapted from the SQuAD script.
17
- #
18
-
19
- # Lint as: python3
20
- """FaQuAD: Reading Comprehension Dataset in the Domain of Brazilian Higher Education."""
21
-
22
-
23
- import json
24
-
25
- import datasets
26
-
27
-
28
- logger = datasets.logging.get_logger(__name__)
29
-
30
-
31
- _CITATION = """\
32
- @INPROCEEDINGS{
33
- 8923668,
34
- author={Sayama, Hélio Fonseca and Araujo, Anderson Viçoso and Fernandes, Eraldo Rezende},
35
- booktitle={2019 8th Brazilian Conference on Intelligent Systems (BRACIS)},
36
- title={FaQuAD: Reading Comprehension Dataset in the Domain of Brazilian Higher Education},
37
- year={2019},
38
- volume={},
39
- number={},
40
- pages={443-448},
41
- doi={10.1109/BRACIS.2019.00084}
42
- }
43
- """
44
-
45
- _DESCRIPTION = """\
46
- Academic secretaries and faculty members of higher education institutions face a common problem:
47
- the abundance of questions sent by academics
48
- whose answers are found in available institutional documents.
49
- The official documents produced by Brazilian public universities are vast and disperse,
50
- which discourage students to further search for answers in such sources.
51
- In order to lessen this problem, we present FaQuAD:
52
- a novel machine reading comprehension dataset
53
- in the domain of Brazilian higher education institutions.
54
- FaQuAD follows the format of SQuAD (Stanford Question Answering Dataset) [Rajpurkar et al. 2016].
55
- It comprises 900 questions about 249 reading passages (paragraphs),
56
- which were taken from 18 official documents of a computer science college
57
- from a Brazilian federal university
58
- and 21 Wikipedia articles related to Brazilian higher education system.
59
- As far as we know, this is the first Portuguese reading comprehension dataset in this format.
60
- """
61
-
62
- _URL = "https://raw.githubusercontent.com/liafacom/faquad/master/data/"
63
- _URLS = {
64
- "train": _URL + "train.json",
65
- "dev": _URL + "dev.json",
66
- }
67
-
68
-
69
- class FaquadConfig(datasets.BuilderConfig):
70
- """BuilderConfig for FaQuAD."""
71
-
72
- def __init__(self, **kwargs):
73
- """BuilderConfig for FaQuAD.
74
-
75
- Args:
76
- **kwargs: keyword arguments forwarded to super.
77
- """
78
- super(FaquadConfig, self).__init__(**kwargs)
79
-
80
-
81
- class Faquad(datasets.GeneratorBasedBuilder):
82
- """FaQuAD: Reading Comprehension Dataset in the Domain of Brazilian Higher Education. Version 1.0."""
83
-
84
- BUILDER_CONFIGS = [
85
- FaquadConfig(
86
- name="plain_text",
87
- version=datasets.Version("1.0.0", ""),
88
- description="Plain text",
89
- ),
90
- ]
91
-
92
- def _info(self):
93
- return datasets.DatasetInfo(
94
- description=_DESCRIPTION,
95
- features=datasets.Features(
96
- {
97
- "id": datasets.Value("string"),
98
- "title": datasets.Value("string"),
99
- "context": datasets.Value("string"),
100
- "question": datasets.Value("string"),
101
- "answers": datasets.features.Sequence(
102
- {
103
- "text": datasets.Value("string"),
104
- "answer_start": datasets.Value("int32"),
105
- }
106
- ),
107
- }
108
- ),
109
- # No default supervised_keys (as we have to pass both question
110
- # and context as input).
111
- supervised_keys=None,
112
- homepage="https://github.com/liafacom/faquad",
113
- citation=_CITATION,
114
- )
115
-
116
- def _split_generators(self, dl_manager):
117
- downloaded_files = dl_manager.download_and_extract(_URLS)
118
-
119
- return [
120
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
121
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
122
- ]
123
-
124
- def _generate_examples(self, filepath):
125
- """This function returns the examples in the raw (text) form."""
126
- logger.info("generating examples from = %s", filepath)
127
- key = 0
128
- with open(filepath, encoding="utf-8") as f:
129
- faquad = json.load(f)
130
- for article in faquad["data"]:
131
- title = article.get("title", "")
132
- for paragraph in article["paragraphs"]:
133
- context = paragraph["context"] # do not strip leading blank spaces GH-2585
134
- for qa in paragraph["qas"]:
135
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
136
- answers = [answer["text"] for answer in qa["answers"]]
137
- # Features currently used are "context", "question", and "answers".
138
- # Others are extracted here for the ease of future expansions.
139
- yield key, {
140
- "title": title,
141
- "context": context,
142
- "question": qa["question"],
143
- "id": qa["id"],
144
- "answers": {
145
- "answer_start": answer_starts,
146
- "text": answers,
147
- },
148
- }
149
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c32edfb7af6dfa63d66668a8fa47433a7a107280ed1b98243913993e54d98d9f
3
+ size 198112
plain_text/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c552fef5dc7f55b65c5c5e551500a13c3df27f55cfebd5c57afe37e9a7a3e7bc
3
+ size 37896