Convert dataset to Parquet
#4
by
lhoestq
HF Staff
- opened
- README.md +11 -4
- conll2000.py +0 -187
- data/test-00000-of-00001.parquet +3 -0
- data/train-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -86,13 +86,20 @@ dataset_info:
|
|
86 |
'22': I-VP
|
87 |
splits:
|
88 |
- name: train
|
89 |
-
num_bytes:
|
90 |
num_examples: 8937
|
91 |
- name: test
|
92 |
-
num_bytes:
|
93 |
num_examples: 2013
|
94 |
-
download_size:
|
95 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
---
|
97 |
|
98 |
# Dataset Card for "conll2000"
|
|
|
86 |
'22': I-VP
|
87 |
splits:
|
88 |
- name: train
|
89 |
+
num_bytes: 5356945
|
90 |
num_examples: 8937
|
91 |
- name: test
|
92 |
+
num_bytes: 1201131
|
93 |
num_examples: 2013
|
94 |
+
download_size: 1300122
|
95 |
+
dataset_size: 6558076
|
96 |
+
configs:
|
97 |
+
- config_name: default
|
98 |
+
data_files:
|
99 |
+
- split: train
|
100 |
+
path: data/train-*
|
101 |
+
- split: test
|
102 |
+
path: data/test-*
|
103 |
---
|
104 |
|
105 |
# Dataset Card for "conll2000"
|
conll2000.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 HuggingFace Datasets Authors.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# Lint as: python3
|
17 |
-
"""Introduction to the CoNLL-2000 Shared Task: Chunking"""
|
18 |
-
|
19 |
-
import datasets
|
20 |
-
|
21 |
-
|
22 |
-
logger = datasets.logging.get_logger(__name__)
|
23 |
-
|
24 |
-
|
25 |
-
_CITATION = """\
|
26 |
-
@inproceedings{tksbuchholz2000conll,
|
27 |
-
author = "Tjong Kim Sang, Erik F. and Sabine Buchholz",
|
28 |
-
title = "Introduction to the CoNLL-2000 Shared Task: Chunking",
|
29 |
-
editor = "Claire Cardie and Walter Daelemans and Claire
|
30 |
-
Nedellec and Tjong Kim Sang, Erik",
|
31 |
-
booktitle = "Proceedings of CoNLL-2000 and LLL-2000",
|
32 |
-
publisher = "Lisbon, Portugal",
|
33 |
-
pages = "127--132",
|
34 |
-
year = "2000"
|
35 |
-
}
|
36 |
-
"""
|
37 |
-
|
38 |
-
_DESCRIPTION = """\
|
39 |
-
Text chunking consists of dividing a text in syntactically correlated parts of words. For example, the sentence
|
40 |
-
He reckons the current account deficit will narrow to only # 1.8 billion in September . can be divided as follows:
|
41 |
-
[NP He ] [VP reckons ] [NP the current account deficit ] [VP will narrow ] [PP to ] [NP only # 1.8 billion ]
|
42 |
-
[PP in ] [NP September ] .
|
43 |
-
|
44 |
-
Text chunking is an intermediate step towards full parsing. It was the shared task for CoNLL-2000. Training and test
|
45 |
-
data for this task is available. This data consists of the same partitions of the Wall Street Journal corpus (WSJ)
|
46 |
-
as the widely used data for noun phrase chunking: sections 15-18 as training data (211727 tokens) and section 20 as
|
47 |
-
test data (47377 tokens). The annotation of the data has been derived from the WSJ corpus by a program written by
|
48 |
-
Sabine Buchholz from Tilburg University, The Netherlands.
|
49 |
-
"""
|
50 |
-
|
51 |
-
_URL = "https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/"
|
52 |
-
_TRAINING_FILE = "train.txt"
|
53 |
-
_TEST_FILE = "test.txt"
|
54 |
-
|
55 |
-
|
56 |
-
class Conll2000(datasets.GeneratorBasedBuilder):
|
57 |
-
"""Conll2000 dataset."""
|
58 |
-
|
59 |
-
def _info(self):
|
60 |
-
return datasets.DatasetInfo(
|
61 |
-
description=_DESCRIPTION,
|
62 |
-
features=datasets.Features(
|
63 |
-
{
|
64 |
-
"id": datasets.Value("string"),
|
65 |
-
"tokens": datasets.Sequence(datasets.Value("string")),
|
66 |
-
"pos_tags": datasets.Sequence(
|
67 |
-
datasets.features.ClassLabel(
|
68 |
-
names=[
|
69 |
-
"''",
|
70 |
-
"#",
|
71 |
-
"$",
|
72 |
-
"(",
|
73 |
-
")",
|
74 |
-
",",
|
75 |
-
".",
|
76 |
-
":",
|
77 |
-
"``",
|
78 |
-
"CC",
|
79 |
-
"CD",
|
80 |
-
"DT",
|
81 |
-
"EX",
|
82 |
-
"FW",
|
83 |
-
"IN",
|
84 |
-
"JJ",
|
85 |
-
"JJR",
|
86 |
-
"JJS",
|
87 |
-
"MD",
|
88 |
-
"NN",
|
89 |
-
"NNP",
|
90 |
-
"NNPS",
|
91 |
-
"NNS",
|
92 |
-
"PDT",
|
93 |
-
"POS",
|
94 |
-
"PRP",
|
95 |
-
"PRP$",
|
96 |
-
"RB",
|
97 |
-
"RBR",
|
98 |
-
"RBS",
|
99 |
-
"RP",
|
100 |
-
"SYM",
|
101 |
-
"TO",
|
102 |
-
"UH",
|
103 |
-
"VB",
|
104 |
-
"VBD",
|
105 |
-
"VBG",
|
106 |
-
"VBN",
|
107 |
-
"VBP",
|
108 |
-
"VBZ",
|
109 |
-
"WDT",
|
110 |
-
"WP",
|
111 |
-
"WP$",
|
112 |
-
"WRB",
|
113 |
-
]
|
114 |
-
)
|
115 |
-
),
|
116 |
-
"chunk_tags": datasets.Sequence(
|
117 |
-
datasets.features.ClassLabel(
|
118 |
-
names=[
|
119 |
-
"O",
|
120 |
-
"B-ADJP",
|
121 |
-
"I-ADJP",
|
122 |
-
"B-ADVP",
|
123 |
-
"I-ADVP",
|
124 |
-
"B-CONJP",
|
125 |
-
"I-CONJP",
|
126 |
-
"B-INTJ",
|
127 |
-
"I-INTJ",
|
128 |
-
"B-LST",
|
129 |
-
"I-LST",
|
130 |
-
"B-NP",
|
131 |
-
"I-NP",
|
132 |
-
"B-PP",
|
133 |
-
"I-PP",
|
134 |
-
"B-PRT",
|
135 |
-
"I-PRT",
|
136 |
-
"B-SBAR",
|
137 |
-
"I-SBAR",
|
138 |
-
"B-UCP",
|
139 |
-
"I-UCP",
|
140 |
-
"B-VP",
|
141 |
-
"I-VP",
|
142 |
-
]
|
143 |
-
)
|
144 |
-
),
|
145 |
-
}
|
146 |
-
),
|
147 |
-
supervised_keys=None,
|
148 |
-
homepage="https://www.clips.uantwerpen.be/conll2000/chunking/",
|
149 |
-
citation=_CITATION,
|
150 |
-
)
|
151 |
-
|
152 |
-
def _split_generators(self, dl_manager):
|
153 |
-
"""Returns SplitGenerators."""
|
154 |
-
urls_to_download = {
|
155 |
-
"train": f"{_URL}{_TRAINING_FILE}",
|
156 |
-
"test": f"{_URL}{_TEST_FILE}",
|
157 |
-
}
|
158 |
-
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
159 |
-
|
160 |
-
return [
|
161 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
162 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
163 |
-
]
|
164 |
-
|
165 |
-
def _generate_examples(self, filepath):
|
166 |
-
logger.info("⏳ Generating examples from = %s", filepath)
|
167 |
-
with open(filepath, encoding="utf-8") as f:
|
168 |
-
guid = 0
|
169 |
-
tokens = []
|
170 |
-
pos_tags = []
|
171 |
-
chunk_tags = []
|
172 |
-
for line in f:
|
173 |
-
if line == "" or line == "\n":
|
174 |
-
if tokens:
|
175 |
-
yield guid, {"id": str(guid), "tokens": tokens, "pos_tags": pos_tags, "chunk_tags": chunk_tags}
|
176 |
-
guid += 1
|
177 |
-
tokens = []
|
178 |
-
pos_tags = []
|
179 |
-
chunk_tags = []
|
180 |
-
else:
|
181 |
-
# conll2000 tokens are space separated
|
182 |
-
splits = line.split(" ")
|
183 |
-
tokens.append(splits[0])
|
184 |
-
pos_tags.append(splits[1])
|
185 |
-
chunk_tags.append(splits[2].rstrip())
|
186 |
-
# last example
|
187 |
-
yield guid, {"id": str(guid), "tokens": tokens, "pos_tags": pos_tags, "chunk_tags": chunk_tags}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68bc0af3f527709c1d48193c40068bba88a30b76d0ec0db00a9b1120a9a2c445
|
3 |
+
size 244073
|
data/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee4e62ab0ba721f4b70dca6505a00f60349145daf99900b98d510c4dafe93100
|
3 |
+
size 1056049
|