PerSets commited on
Commit
925ea45
·
1 Parent(s): 4ed8885

feat: loading script

Browse files
Files changed (1) hide show
  1. filimo2024asr.py +97 -0
filimo2024asr.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tarfile
3
+ import datasets
4
+ import pandas as pd
5
+ from typing import Dict, List
6
+ import io
7
+ from tqdm import tqdm
8
+ import csv
9
+ import os
10
+
11
+ _DESCRIPTION = """
12
+ This dataset consists of about 400 hours of audio extracted from various Filimo videos in the Persian language.
13
+ Note: This dataset contains raw, unvalidated transcriptions. Users are advised to:
14
+ 1. Perform their own quality assessment
15
+ 2. Create their own train/validation/test splits based on their specific needs
16
+ 3. Validate a subset of the data if needed for their use case
17
+ """
18
+
19
+ _CITATION = """
20
+ Use this repo info/link for citation.
21
+ """
22
+
23
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
24
+
25
+ _HOMEPAGE = "https://huggingface.co/datasets/PerSets/filimo2024asr"
26
+
27
+ _BASE_URL = "https://huggingface.co/datasets/PerSets/filimo2024asr/resolve/main/"
28
+
29
+ _AUDIO_URL = _BASE_URL + "data/unvalidated_{shard_idx:03d}.tar"
30
+
31
+ class FilimoASRDataset(datasets.GeneratorBasedBuilder):
32
+
33
+ DEFAULT_WRITER_BATCH_SIZE = 1000
34
+
35
+ VERSION = datasets.Version("1.0.0")
36
+
37
+ def _info(self):
38
+ return datasets.DatasetInfo(
39
+ features=datasets.Features({
40
+ "audio": datasets.Audio(sampling_rate=44_000), # Adjust sampling rate as needed
41
+ "text": datasets.Value("string"),
42
+ "file_name": datasets.Value("string"),
43
+ }),
44
+ supervised_keys=None,
45
+ license=_LICENSE,
46
+ citation=_CITATION,
47
+ version=self.VERSION,
48
+ description=_DESCRIPTION
49
+ )
50
+
51
+ def _split_generators(self, dl_manager):
52
+ """Returns SplitGenerators."""
53
+
54
+ archive_paths = [_AUDIO_URL.format(shard_idx=i) for i in range(1, 34)]
55
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
56
+
57
+ return [
58
+ datasets.SplitGenerator(
59
+ name="unvalidated", # Or adjust splits as needed
60
+ gen_kwargs={
61
+ #"tar_dir": tar_dir,
62
+ #"metadata_path": metadata_path,
63
+ "local_extracted_archive_paths": local_extracted_archive_paths,
64
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths],
65
+ "meta_path": _BASE_URL + "unvalidated.csv",
66
+ },
67
+ ),
68
+ ]
69
+
70
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
71
+ """Yields examples."""
72
+ # Load TSV metadata
73
+ data_fields = list(self._info().features.keys())
74
+ metadata = {}
75
+ with open(meta_path, encoding="utf-8") as f:
76
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
77
+ for row in tqdm(reader, desc="Reading metadata..."):
78
+ if not row["file_name"].endswith(".mp3"):
79
+ row["file_name"] += ".mp3"
80
+ if "sentence" in row:
81
+ row['text'] = row['sentence']
82
+ del row['sentence']
83
+ for field in data_fields:
84
+ if field not in row:
85
+ row[field] = ""
86
+ metadata[row["file_name"]] = row
87
+
88
+ for i, audio_archive in enumerate(archives):
89
+ for path, file in audio_archive:
90
+ _, filename = os.path.split(path)
91
+ if filename in metadata:
92
+ result = dict(metadata[filename])
93
+ # set the audio feature and the path to the extracted file
94
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
95
+ result["audio"] = {"path": path, "bytes": file.read()}
96
+ result["file_name"] = path
97
+ yield path, result