text
stringlengths 0
4.99k
|
---|
def get_data(wavs, id_to_text, maxlen=50):
|
\"\"\" returns mapping of audio paths and transcription texts \"\"\"
|
data = []
|
for w in wavs:
|
id = w.split(\"/\")[-1].split(\".\")[0]
|
if len(id_to_text[id]) < maxlen:
|
data.append({\"audio\": w, \"text\": id_to_text[id]})
|
return data
|
Downloading data from https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
|
2748579840/2748572632 [==============================] - 57s 0us/step
|
Preprocess the dataset
|
class VectorizeChar:
|
def __init__(self, max_len=50):
|
self.vocab = (
|
[\"-\", \"#\", \"<\", \">\"]
|
+ [chr(i + 96) for i in range(1, 27)]
|
+ [\" \", \".\", \",\", \"?\"]
|
)
|
self.max_len = max_len
|
self.char_to_idx = {}
|
for i, ch in enumerate(self.vocab):
|
self.char_to_idx[ch] = i
|
def __call__(self, text):
|
text = text.lower()
|
text = text[: self.max_len - 2]
|
text = \"<\" + text + \">\"
|
pad_len = self.max_len - len(text)
|
return [self.char_to_idx.get(ch, 1) for ch in text] + [0] * pad_len
|
def get_vocabulary(self):
|
return self.vocab
|
max_target_len = 200 # all transcripts in out data are < 200 characters
|
data = get_data(wavs, id_to_text, max_target_len)
|
vectorizer = VectorizeChar(max_target_len)
|
print(\"vocab size\", len(vectorizer.get_vocabulary()))
|
def create_text_ds(data):
|
texts = [_[\"text\"] for _ in data]
|
text_ds = [vectorizer(t) for t in texts]
|
text_ds = tf.data.Dataset.from_tensor_slices(text_ds)
|
return text_ds
|
def path_to_audio(path):
|
# spectrogram using stft
|
audio = tf.io.read_file(path)
|
audio, _ = tf.audio.decode_wav(audio, 1)
|
audio = tf.squeeze(audio, axis=-1)
|
stfts = tf.signal.stft(audio, frame_length=200, frame_step=80, fft_length=256)
|
x = tf.math.pow(tf.abs(stfts), 0.5)
|
# normalisation
|
means = tf.math.reduce_mean(x, 1, keepdims=True)
|
stddevs = tf.math.reduce_std(x, 1, keepdims=True)
|
x = (x - means) / stddevs
|
audio_len = tf.shape(x)[0]
|
# padding to 10 seconds
|
pad_len = 2754
|
paddings = tf.constant([[0, pad_len], [0, 0]])
|
x = tf.pad(x, paddings, \"CONSTANT\")[:pad_len, :]
|
return x
|
def create_audio_ds(data):
|
flist = [_[\"audio\"] for _ in data]
|
audio_ds = tf.data.Dataset.from_tensor_slices(flist)
|
audio_ds = audio_ds.map(
|
path_to_audio, num_parallel_calls=tf.data.AUTOTUNE
|
)
|
return audio_ds
|
def create_tf_dataset(data, bs=4):
|
audio_ds = create_audio_ds(data)
|
text_ds = create_text_ds(data)
|
ds = tf.data.Dataset.zip((audio_ds, text_ds))
|
ds = ds.map(lambda x, y: {\"source\": x, \"target\": y})
|
ds = ds.batch(bs)
|
ds = ds.prefetch(tf.data.AUTOTUNE)
|
return ds
|
split = int(len(data) * 0.99)
|
train_data = data[:split]
|
test_data = data[split:]
|
ds = create_tf_dataset(train_data, bs=64)
|
val_ds = create_tf_dataset(test_data, bs=4)
|
vocab size 34
|
Callbacks to display predictions
|
class DisplayOutputs(keras.callbacks.Callback):
|
def __init__(
|
self, batch, idx_to_token, target_start_token_idx=27, target_end_token_idx=28
|
):
|
\"\"\"Displays a batch of outputs after every epoch
|
Args:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.