text
stringlengths 0
4.99k
|
---|
print(\"Our class names: {}\".format(class_names,))
|
audio_paths = []
|
labels = []
|
for label, name in enumerate(class_names):
|
print(\"Processing speaker {}\".format(name,))
|
dir_path = Path(DATASET_AUDIO_PATH) / name
|
speaker_sample_paths = [
|
os.path.join(dir_path, filepath)
|
for filepath in os.listdir(dir_path)
|
if filepath.endswith(\".wav\")
|
]
|
audio_paths += speaker_sample_paths
|
labels += [label] * len(speaker_sample_paths)
|
print(
|
\"Found {} files belonging to {} classes.\".format(len(audio_paths), len(class_names))
|
)
|
# Shuffle
|
rng = np.random.RandomState(SHUFFLE_SEED)
|
rng.shuffle(audio_paths)
|
rng = np.random.RandomState(SHUFFLE_SEED)
|
rng.shuffle(labels)
|
# Split into training and validation
|
num_val_samples = int(VALID_SPLIT * len(audio_paths))
|
print(\"Using {} files for training.\".format(len(audio_paths) - num_val_samples))
|
train_audio_paths = audio_paths[:-num_val_samples]
|
train_labels = labels[:-num_val_samples]
|
print(\"Using {} files for validation.\".format(num_val_samples))
|
valid_audio_paths = audio_paths[-num_val_samples:]
|
valid_labels = labels[-num_val_samples:]
|
# Create 2 datasets, one for training and the other for validation
|
train_ds = paths_and_labels_to_dataset(train_audio_paths, train_labels)
|
train_ds = train_ds.shuffle(buffer_size=BATCH_SIZE * 8, seed=SHUFFLE_SEED).batch(
|
BATCH_SIZE
|
)
|
valid_ds = paths_and_labels_to_dataset(valid_audio_paths, valid_labels)
|
valid_ds = valid_ds.shuffle(buffer_size=32 * 8, seed=SHUFFLE_SEED).batch(32)
|
# Add noise to the training set
|
train_ds = train_ds.map(
|
lambda x, y: (add_noise(x, noises, scale=SCALE), y),
|
num_parallel_calls=tf.data.AUTOTUNE,
|
)
|
# Transform audio wave to the frequency domain using `audio_to_fft`
|
train_ds = train_ds.map(
|
lambda x, y: (audio_to_fft(x), y), num_parallel_calls=tf.data.AUTOTUNE
|
)
|
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
|
valid_ds = valid_ds.map(
|
lambda x, y: (audio_to_fft(x), y), num_parallel_calls=tf.data.AUTOTUNE
|
)
|
valid_ds = valid_ds.prefetch(tf.data.AUTOTUNE)
|
Our class names: ['Julia_Gillard', 'Jens_Stoltenberg', 'Nelson_Mandela', 'Magaret_Tarcher', 'Benjamin_Netanyau']
|
Processing speaker Julia_Gillard
|
Processing speaker Jens_Stoltenberg
|
Processing speaker Nelson_Mandela
|
Processing speaker Magaret_Tarcher
|
Processing speaker Benjamin_Netanyau
|
Found 7501 files belonging to 5 classes.
|
Using 6751 files for training.
|
Using 750 files for validation.
|
Model Definition
|
def residual_block(x, filters, conv_num=3, activation=\"relu\"):
|
# Shortcut
|
s = keras.layers.Conv1D(filters, 1, padding=\"same\")(x)
|
for i in range(conv_num - 1):
|
x = keras.layers.Conv1D(filters, 3, padding=\"same\")(x)
|
x = keras.layers.Activation(activation)(x)
|
x = keras.layers.Conv1D(filters, 3, padding=\"same\")(x)
|
x = keras.layers.Add()([x, s])
|
x = keras.layers.Activation(activation)(x)
|
return keras.layers.MaxPool1D(pool_size=2, strides=2)(x)
|
def build_model(input_shape, num_classes):
|
inputs = keras.layers.Input(shape=input_shape, name=\"input\")
|
x = residual_block(inputs, 16, 2)
|
x = residual_block(x, 32, 2)
|
x = residual_block(x, 64, 3)
|
x = residual_block(x, 128, 3)
|
x = residual_block(x, 128, 3)
|
x = keras.layers.AveragePooling1D(pool_size=3, strides=3)(x)
|
x = keras.layers.Flatten()(x)
|
x = keras.layers.Dense(256, activation=\"relu\")(x)
|
x = keras.layers.Dense(128, activation=\"relu\")(x)
|
outputs = keras.layers.Dense(num_classes, activation=\"softmax\", name=\"output\")(x)
|
return keras.models.Model(inputs=inputs, outputs=outputs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.