lhoestq HF Staff commited on
Commit
3316e7f
·
verified ·
1 Parent(s): 61eba14

Delete loading script

Browse files
Files changed (1) hide show
  1. conll2002.py +0 -228
conll2002.py DELETED
@@ -1,228 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Introduction to the CoNLL-2002 Shared Task: Language-Independent Named Entity Recognition"""
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{tjong-kim-sang-2002-introduction,
27
- title = "Introduction to the {C}o{NLL}-2002 Shared Task: Language-Independent Named Entity Recognition",
28
- author = "Tjong Kim Sang, Erik F.",
29
- booktitle = "{COLING}-02: The 6th Conference on Natural Language Learning 2002 ({C}o{NLL}-2002)",
30
- year = "2002",
31
- url = "https://www.aclweb.org/anthology/W02-2024",
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- Named entities are phrases that contain the names of persons, organizations, locations, times and quantities.
37
-
38
- Example:
39
- [PER Wolff] , currently a journalist in [LOC Argentina] , played with [PER Del Bosque] in the final years of the seventies in [ORG Real Madrid] .
40
-
41
- The shared task of CoNLL-2002 concerns language-independent named entity recognition.
42
- We will concentrate on four types of named entities: persons, locations, organizations and names of miscellaneous entities that do not belong to the previous three groups.
43
- The participants of the shared task will be offered training and test data for at least two languages.
44
- They will use the data for developing a named-entity recognition system that includes a machine learning component.
45
- Information sources other than the training data may be used in this shared task.
46
- We are especially interested in methods that can use additional unannotated data for improving their performance (for example co-training).
47
-
48
- The train/validation/test sets are available in Spanish and Dutch.
49
-
50
- For more details see https://www.clips.uantwerpen.be/conll2002/ner/ and https://www.aclweb.org/anthology/W02-2024/
51
- """
52
-
53
- _URL = "https://raw.githubusercontent.com/teropa/nlp/master/resources/corpora/conll2002/"
54
- _ES_TRAINING_FILE = "esp.train"
55
- _ES_DEV_FILE = "esp.testa"
56
- _ES_TEST_FILE = "esp.testb"
57
- _NL_TRAINING_FILE = "ned.train"
58
- _NL_DEV_FILE = "ned.testa"
59
- _NL_TEST_FILE = "ned.testb"
60
-
61
-
62
- class Conll2002Config(datasets.BuilderConfig):
63
- """BuilderConfig for Conll2002"""
64
-
65
- def __init__(self, **kwargs):
66
- """BuilderConfig forConll2002.
67
-
68
- Args:
69
- **kwargs: keyword arguments forwarded to super.
70
- """
71
- super(Conll2002Config, self).__init__(**kwargs)
72
-
73
-
74
- class Conll2002(datasets.GeneratorBasedBuilder):
75
- """Conll2002 dataset."""
76
-
77
- BUILDER_CONFIGS = [
78
- Conll2002Config(name="es", version=datasets.Version("1.0.0"), description="Conll2002 Spanish dataset"),
79
- Conll2002Config(name="nl", version=datasets.Version("1.0.0"), description="Conll2002 Dutch dataset"),
80
- ]
81
-
82
- def _info(self):
83
- return datasets.DatasetInfo(
84
- description=_DESCRIPTION,
85
- features=datasets.Features(
86
- {
87
- "id": datasets.Value("string"),
88
- "tokens": datasets.Sequence(datasets.Value("string")),
89
- "pos_tags": datasets.Sequence(
90
- datasets.features.ClassLabel(
91
- names=[
92
- "AO",
93
- "AQ",
94
- "CC",
95
- "CS",
96
- "DA",
97
- "DE",
98
- "DD",
99
- "DI",
100
- "DN",
101
- "DP",
102
- "DT",
103
- "Faa",
104
- "Fat",
105
- "Fc",
106
- "Fd",
107
- "Fe",
108
- "Fg",
109
- "Fh",
110
- "Fia",
111
- "Fit",
112
- "Fp",
113
- "Fpa",
114
- "Fpt",
115
- "Fs",
116
- "Ft",
117
- "Fx",
118
- "Fz",
119
- "I",
120
- "NC",
121
- "NP",
122
- "P0",
123
- "PD",
124
- "PI",
125
- "PN",
126
- "PP",
127
- "PR",
128
- "PT",
129
- "PX",
130
- "RG",
131
- "RN",
132
- "SP",
133
- "VAI",
134
- "VAM",
135
- "VAN",
136
- "VAP",
137
- "VAS",
138
- "VMG",
139
- "VMI",
140
- "VMM",
141
- "VMN",
142
- "VMP",
143
- "VMS",
144
- "VSG",
145
- "VSI",
146
- "VSM",
147
- "VSN",
148
- "VSP",
149
- "VSS",
150
- "Y",
151
- "Z",
152
- ]
153
- )
154
- if self.config.name == "es"
155
- else datasets.features.ClassLabel(
156
- names=["Adj", "Adv", "Art", "Conj", "Int", "Misc", "N", "Num", "Prep", "Pron", "Punc", "V"]
157
- )
158
- ),
159
- "ner_tags": datasets.Sequence(
160
- datasets.features.ClassLabel(
161
- names=[
162
- "O",
163
- "B-PER",
164
- "I-PER",
165
- "B-ORG",
166
- "I-ORG",
167
- "B-LOC",
168
- "I-LOC",
169
- "B-MISC",
170
- "I-MISC",
171
- ]
172
- )
173
- ),
174
- }
175
- ),
176
- supervised_keys=None,
177
- homepage="https://www.aclweb.org/anthology/W02-2024/",
178
- citation=_CITATION,
179
- )
180
-
181
- def _split_generators(self, dl_manager):
182
- """Returns SplitGenerators."""
183
- urls_to_download = {
184
- "train": f"{_URL}{_ES_TRAINING_FILE if self.config.name == 'es' else _NL_TRAINING_FILE}",
185
- "dev": f"{_URL}{_ES_DEV_FILE if self.config.name == 'es' else _NL_DEV_FILE}",
186
- "test": f"{_URL}{_ES_TEST_FILE if self.config.name == 'es' else _NL_TEST_FILE}",
187
- }
188
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
189
-
190
- return [
191
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
192
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
193
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
194
- ]
195
-
196
- def _generate_examples(self, filepath):
197
- logger.info("⏳ Generating examples from = %s", filepath)
198
- with open(filepath, encoding="utf-8") as f:
199
- guid = 0
200
- tokens = []
201
- pos_tags = []
202
- ner_tags = []
203
- for line in f:
204
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
205
- if tokens:
206
- yield guid, {
207
- "id": str(guid),
208
- "tokens": tokens,
209
- "pos_tags": pos_tags,
210
- "ner_tags": ner_tags,
211
- }
212
- guid += 1
213
- tokens = []
214
- pos_tags = []
215
- ner_tags = []
216
- else:
217
- # conll2002 tokens are space separated
218
- splits = line.split(" ")
219
- tokens.append(splits[0])
220
- pos_tags.append(splits[1])
221
- ner_tags.append(splits[2].rstrip())
222
- # last example
223
- yield guid, {
224
- "id": str(guid),
225
- "tokens": tokens,
226
- "pos_tags": pos_tags,
227
- "ner_tags": ner_tags,
228
- }