lhoestq HF Staff commited on
Commit
03a8274
·
verified ·
1 Parent(s): 166e002

Delete loading script

Browse files
Files changed (1) hide show
  1. minds14.py +0 -170
minds14.py DELETED
@@ -1,170 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The PolyAI and HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import csv
17
- import os
18
-
19
- import datasets
20
-
21
- logger = datasets.logging.get_logger(__name__)
22
-
23
-
24
- """ MInDS-14 Dataset"""
25
-
26
- _CITATION = """\
27
- @article{gerz2021multilingual,
28
- title={Multilingual and cross-lingual intent detection from spoken data},
29
- author={Gerz, Daniela and Su, Pei-Hao and Kusztos, Razvan and Mondal, Avishek and Lis, Michal and Singhal, Eshan and Mrk{\v{s}}i{\'c}, Nikola and Wen, Tsung-Hsien and Vuli{\'c}, Ivan},
30
- journal={arXiv preprint arXiv:2104.08524},
31
- year={2021}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- MINDS-14 is training and evaluation resource for intent
37
- detection task with spoken data. It covers 14
38
- intents extracted from a commercial system
39
- in the e-banking domain, associated with spoken examples in 14 diverse language varieties.
40
- """
41
-
42
- _ALL_CONFIGS = sorted([
43
- "cs-CZ", "de-DE", "en-AU", "en-GB", "en-US", "es-ES", "fr-FR", "it-IT", "ko-KR", "nl-NL", "pl-PL", "pt-PT", "ru-RU", "zh-CN"
44
- ])
45
-
46
-
47
- _DESCRIPTION = "MINDS-14 is a dataset for the intent detection task with spoken data. It covers 14 intents extracted from a commercial system in the e-banking domain, associated with spoken examples in 14 diverse language varieties."
48
-
49
- _HOMEPAGE_URL = "https://arxiv.org/abs/2104.08524"
50
-
51
- _DATA_URL = "data/MInDS-14.zip"
52
-
53
-
54
- class Minds14Config(datasets.BuilderConfig):
55
- """BuilderConfig for xtreme-s"""
56
-
57
- def __init__(
58
- self, name, description, homepage, data_url
59
- ):
60
- super(Minds14Config, self).__init__(
61
- name=self.name,
62
- version=datasets.Version("1.0.0", ""),
63
- description=self.description,
64
- )
65
- self.name = name
66
- self.description = description
67
- self.homepage = homepage
68
- self.data_url = data_url
69
-
70
-
71
- def _build_config(name):
72
- return Minds14Config(
73
- name=name,
74
- description=_DESCRIPTION,
75
- homepage=_HOMEPAGE_URL,
76
- data_url=_DATA_URL,
77
- )
78
-
79
-
80
- class Minds14(datasets.GeneratorBasedBuilder):
81
-
82
- DEFAULT_WRITER_BATCH_SIZE = 1000
83
- BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS + ["all"]]
84
-
85
- def _info(self):
86
- langs = _ALL_CONFIGS
87
- features = datasets.Features(
88
- {
89
- "path": datasets.Value("string"),
90
- "audio": datasets.Audio(sampling_rate=8_000),
91
- "transcription": datasets.Value("string"),
92
- "english_transcription": datasets.Value("string"),
93
- "intent_class": datasets.ClassLabel(
94
- names=[
95
- "abroad",
96
- "address",
97
- "app_error",
98
- "atm_limit",
99
- "balance",
100
- "business_loan",
101
- "card_issues",
102
- "cash_deposit",
103
- "direct_debit",
104
- "freeze",
105
- "high_value_payment",
106
- "joint_account",
107
- "latest_transactions",
108
- "pay_bill",
109
- ]
110
- ),
111
- "lang_id": datasets.ClassLabel(names=langs),
112
- }
113
- )
114
-
115
- return datasets.DatasetInfo(
116
- description=_DESCRIPTION,
117
- features=features,
118
- supervised_keys=("audio", "transcription"),
119
- homepage=self.config.homepage,
120
- citation=_CITATION,
121
- )
122
-
123
- def _split_generators(self, dl_manager):
124
- langs = (
125
- _ALL_CONFIGS
126
- if self.config.name == "all"
127
- else [self.config.name]
128
- )
129
-
130
- archive_path = dl_manager.download_and_extract(self.config.data_url)
131
- audio_path = dl_manager.extract(
132
- os.path.join(archive_path, "MInDS-14", "audio.zip")
133
- )
134
- text_path = dl_manager.extract(
135
- os.path.join(archive_path, "MInDS-14", "text.zip")
136
- )
137
-
138
- text_path = {l: os.path.join(text_path, f"{l}.csv") for l in langs}
139
-
140
- return [
141
- datasets.SplitGenerator(
142
- name=datasets.Split.TRAIN,
143
- gen_kwargs={
144
- "audio_path": audio_path,
145
- "text_paths": text_path,
146
- },
147
- )
148
- ]
149
-
150
-
151
- def _generate_examples(self, audio_path, text_paths):
152
- key = 0
153
- for lang in text_paths.keys():
154
- text_path = text_paths[lang]
155
- with open(text_path, encoding="utf-8") as csv_file:
156
- csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
157
- next(csv_reader)
158
- for row in csv_reader:
159
- file_path, transcription, english_transcription, intent_class = row
160
-
161
- file_path = os.path.join(audio_path, *file_path.split("/"))
162
- yield key, {
163
- "path": file_path,
164
- "audio": file_path,
165
- "transcription": transcription,
166
- "english_transcription": english_transcription,
167
- "intent_class": intent_class.lower(),
168
- "lang_id": _ALL_CONFIGS.index(lang),
169
- }
170
- key += 1