text
stringlengths 0
4.99k
|
---|
freq_min=125,
|
freq_max=7600,
|
**kwargs,
|
):
|
super().__init__(**kwargs)
|
self.frame_length = frame_length
|
self.frame_step = frame_step
|
self.fft_length = fft_length
|
self.sampling_rate = sampling_rate
|
self.num_mel_channels = num_mel_channels
|
self.freq_min = freq_min
|
self.freq_max = freq_max
|
# Defining mel filter. This filter will be multiplied with the STFT output
|
self.mel_filterbank = tf.signal.linear_to_mel_weight_matrix(
|
num_mel_bins=self.num_mel_channels,
|
num_spectrogram_bins=self.frame_length // 2 + 1,
|
sample_rate=self.sampling_rate,
|
lower_edge_hertz=self.freq_min,
|
upper_edge_hertz=self.freq_max,
|
)
|
def call(self, audio, training=True):
|
# We will only perform the transformation during training.
|
if training:
|
# Taking the Short Time Fourier Transform. Ensure that the audio is padded.
|
# In the paper, the STFT output is padded using the 'REFLECT' strategy.
|
stft = tf.signal.stft(
|
tf.squeeze(audio, -1),
|
self.frame_length,
|
self.frame_step,
|
self.fft_length,
|
pad_end=True,
|
)
|
# Taking the magnitude of the STFT output
|
magnitude = tf.abs(stft)
|
# Multiplying the Mel-filterbank with the magnitude and scaling it using the db scale
|
mel = tf.matmul(tf.square(magnitude), self.mel_filterbank)
|
log_mel_spec = tfio.audio.dbscale(mel, top_db=80)
|
return log_mel_spec
|
else:
|
return audio
|
def get_config(self):
|
config = super(MelSpec, self).get_config()
|
config.update(
|
{
|
\"frame_length\": self.frame_length,
|
\"frame_step\": self.frame_step,
|
\"fft_length\": self.fft_length,
|
\"sampling_rate\": self.sampling_rate,
|
\"num_mel_channels\": self.num_mel_channels,
|
\"freq_min\": self.freq_min,
|
\"freq_max\": self.freq_max,
|
}
|
)
|
return config
|
The residual convolutional block extensively uses dilations and has a total receptive field of 27 timesteps per block. The dilations must grow as a power of the kernel_size to ensure reduction of hissing noise in the output. The network proposed by the paper is as follows:
|
ConvBlock
|
# Creating the residual stack block
|
def residual_stack(input, filters):
|
\"\"\"Convolutional residual stack with weight normalization.
|
Args:
|
filter: int, determines filter size for the residual stack.
|
Returns:
|
Residual stack output.
|
\"\"\"
|
c1 = addon_layers.WeightNormalization(
|
layers.Conv1D(filters, 3, dilation_rate=1, padding=\"same\"), data_init=False
|
)(input)
|
lrelu1 = layers.LeakyReLU()(c1)
|
c2 = addon_layers.WeightNormalization(
|
layers.Conv1D(filters, 3, dilation_rate=1, padding=\"same\"), data_init=False
|
)(lrelu1)
|
add1 = layers.Add()([c2, input])
|
lrelu2 = layers.LeakyReLU()(add1)
|
c3 = addon_layers.WeightNormalization(
|
layers.Conv1D(filters, 3, dilation_rate=3, padding=\"same\"), data_init=False
|
)(lrelu2)
|
lrelu3 = layers.LeakyReLU()(c3)
|
c4 = addon_layers.WeightNormalization(
|
layers.Conv1D(filters, 3, dilation_rate=1, padding=\"same\"), data_init=False
|
)(lrelu3)
|
add2 = layers.Add()([add1, c4])
|
lrelu4 = layers.LeakyReLU()(add2)
|
c5 = addon_layers.WeightNormalization(
|
layers.Conv1D(filters, 3, dilation_rate=9, padding=\"same\"), data_init=False
|
)(lrelu4)
|
lrelu5 = layers.LeakyReLU()(c5)
|
c6 = addon_layers.WeightNormalization(
|
layers.Conv1D(filters, 3, dilation_rate=1, padding=\"same\"), data_init=False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.