lhoestq HF Staff commited on
Commit
67d0a09
·
verified ·
1 Parent(s): b34682c

Delete loading script

Browse files
Files changed (1) hide show
  1. conllpp.py +0 -228
conllpp.py DELETED
@@ -1,228 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CrossWeigh: Training Named Entity Tagger from Imperfect Annotations"""
18
-
19
- import logging
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{wang2019crossweigh,
26
- title={CrossWeigh: Training Named Entity Tagger from Imperfect Annotations},
27
- author={Wang, Zihan and Shang, Jingbo and Liu, Liyuan and Lu, Lihao and Liu, Jiacheng and Han, Jiawei},
28
- booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
29
- pages={5157--5166},
30
- year={2019}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- CoNLLpp is a corrected version of the CoNLL2003 NER dataset where labels of 5.38% of the sentences in the test set
36
- have been manually corrected. The training set and development set are included for completeness.
37
- For more details see https://www.aclweb.org/anthology/D19-1519/ and https://github.com/ZihanWangKi/CrossWeigh
38
- """
39
-
40
- _URL = "https://github.com/ZihanWangKi/CrossWeigh/raw/master/data/"
41
- _TRAINING_FILE = "conllpp_train.txt"
42
- _DEV_FILE = "conllpp_dev.txt"
43
- _TEST_FILE = "conllpp_test.txt"
44
-
45
-
46
- class ConllppConfig(datasets.BuilderConfig):
47
- """BuilderConfig for Conll2003"""
48
-
49
- def __init__(self, **kwargs):
50
- """BuilderConfig forConll2003.
51
- Args:
52
- **kwargs: keyword arguments forwarded to super.
53
- """
54
- super(ConllppConfig, self).__init__(**kwargs)
55
-
56
-
57
- class Conllpp(datasets.GeneratorBasedBuilder):
58
- """Conllpp dataset."""
59
-
60
- BUILDER_CONFIGS = [
61
- ConllppConfig(name="conllpp", version=datasets.Version("1.0.0"), description="Conllpp dataset"),
62
- ]
63
-
64
- def _info(self):
65
- return datasets.DatasetInfo(
66
- description=_DESCRIPTION,
67
- features=datasets.Features(
68
- {
69
- "id": datasets.Value("string"),
70
- "tokens": datasets.Sequence(datasets.Value("string")),
71
- "pos_tags": datasets.Sequence(
72
- datasets.features.ClassLabel(
73
- names=[
74
- '"',
75
- "''",
76
- "#",
77
- "$",
78
- "(",
79
- ")",
80
- ",",
81
- ".",
82
- ":",
83
- "``",
84
- "CC",
85
- "CD",
86
- "DT",
87
- "EX",
88
- "FW",
89
- "IN",
90
- "JJ",
91
- "JJR",
92
- "JJS",
93
- "LS",
94
- "MD",
95
- "NN",
96
- "NNP",
97
- "NNPS",
98
- "NNS",
99
- "NN|SYM",
100
- "PDT",
101
- "POS",
102
- "PRP",
103
- "PRP$",
104
- "RB",
105
- "RBR",
106
- "RBS",
107
- "RP",
108
- "SYM",
109
- "TO",
110
- "UH",
111
- "VB",
112
- "VBD",
113
- "VBG",
114
- "VBN",
115
- "VBP",
116
- "VBZ",
117
- "WDT",
118
- "WP",
119
- "WP$",
120
- "WRB",
121
- ]
122
- )
123
- ),
124
- "chunk_tags": datasets.Sequence(
125
- datasets.features.ClassLabel(
126
- names=[
127
- "O",
128
- "B-ADJP",
129
- "I-ADJP",
130
- "B-ADVP",
131
- "I-ADVP",
132
- "B-CONJP",
133
- "I-CONJP",
134
- "B-INTJ",
135
- "I-INTJ",
136
- "B-LST",
137
- "I-LST",
138
- "B-NP",
139
- "I-NP",
140
- "B-PP",
141
- "I-PP",
142
- "B-PRT",
143
- "I-PRT",
144
- "B-SBAR",
145
- "I-SBAR",
146
- "B-UCP",
147
- "I-UCP",
148
- "B-VP",
149
- "I-VP",
150
- ]
151
- )
152
- ),
153
- "ner_tags": datasets.Sequence(
154
- datasets.features.ClassLabel(
155
- names=[
156
- "O",
157
- "B-PER",
158
- "I-PER",
159
- "B-ORG",
160
- "I-ORG",
161
- "B-LOC",
162
- "I-LOC",
163
- "B-MISC",
164
- "I-MISC",
165
- ]
166
- )
167
- ),
168
- }
169
- ),
170
- supervised_keys=None,
171
- homepage="https://github.com/ZihanWangKi/CrossWeigh",
172
- citation=_CITATION,
173
- )
174
-
175
- def _split_generators(self, dl_manager):
176
- """Returns SplitGenerators."""
177
- urls_to_download = {
178
- "train": f"{_URL}{_TRAINING_FILE}",
179
- "dev": f"{_URL}{_DEV_FILE}",
180
- "test": f"{_URL}{_TEST_FILE}",
181
- }
182
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
183
-
184
- return [
185
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
186
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
187
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
188
- ]
189
-
190
- def _generate_examples(self, filepath):
191
- logging.info("⏳ Generating examples from = %s", filepath)
192
- with open(filepath, encoding="utf-8") as f:
193
- guid = 0
194
- tokens = []
195
- pos_tags = []
196
- chunk_tags = []
197
- ner_tags = []
198
- for line in f:
199
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
200
- if tokens:
201
- yield guid, {
202
- "id": str(guid),
203
- "tokens": tokens,
204
- "pos_tags": pos_tags,
205
- "chunk_tags": chunk_tags,
206
- "ner_tags": ner_tags,
207
- }
208
- guid += 1
209
- tokens = []
210
- pos_tags = []
211
- chunk_tags = []
212
- ner_tags = []
213
- else:
214
- # conll2003 tokens are space separated
215
- splits = line.split(" ")
216
- tokens.append(splits[0])
217
- pos_tags.append(splits[1])
218
- chunk_tags.append(splits[2])
219
- ner_tags.append(splits[3].rstrip())
220
- # last example
221
- if tokens:
222
- yield guid, {
223
- "id": str(guid),
224
- "tokens": tokens,
225
- "pos_tags": pos_tags,
226
- "chunk_tags": chunk_tags,
227
- "ner_tags": ner_tags,
228
- }